diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..de928aa --- /dev/null +++ b/Makefile @@ -0,0 +1,2 @@ +test: + find -name \*.tf -exec grep -r provider {} \; -print diff --git a/accounts/mdupont/Makefile b/accounts/mdupont/Makefile index 9d2b026..47e8dbb 100644 --- a/accounts/mdupont/Makefile +++ b/accounts/mdupont/Makefile @@ -1,3 +1,4 @@ make : - #tofu init + tofu init tofu plan + tofu apply diff --git a/accounts/mdupont/custom_style.css b/accounts/mdupont/custom_style.css new file mode 100644 index 0000000..7fb0c8b --- /dev/null +++ b/accounts/mdupont/custom_style.css @@ -0,0 +1 @@ +.label-customizable {font-weight: 400;} diff --git a/accounts/mdupont/data.tf b/accounts/mdupont/data.tf index d4e6bbb..fbb2afa 100644 --- a/accounts/mdupont/data.tf +++ b/accounts/mdupont/data.tf @@ -7,21 +7,20 @@ data "aws_iam_user" "example_user" { user_name = var.iam_user } -data "aws_dynamodb_table" "terraform_dynamo_table" { - name = var.table_name -} +#data "aws_dynamodb_table" "terraform_dynamo_table" { +# name = var.table_name +#} # Data resource for AWS call identity data "aws_caller_identity" "current" {} -data "aws_s3_bucket" "terraform_logging" { - bucket = "${var.project_name}-tf-state-log-${var.aws_region}" -} - -data "aws_s3_bucket" "terraform_state" { - bucket = "${var.project_name}-tf-state-${var.aws_region}" -} +#data "aws_s3_bucket" "terraform_logging" { +# bucket = "${var.project_name}-tf-state-log-${var.aws_region}" +#} +#data "aws_s3_bucket" "terraform_state" { +# bucket = "${var.project_name}-tf-state-${var.aws_region}" +#} -data "aws_cloudtrail_service_account" "main" {} +#data "aws_cloudtrail_service_account" "main" {} diff --git a/accounts/mdupont/logo.png b/accounts/mdupont/logo.png new file mode 100644 index 0000000..33d0f2d Binary files /dev/null and b/accounts/mdupont/logo.png differ diff --git a/accounts/mdupont/main.tf b/accounts/mdupont/main.tf new file mode 100644 index 0000000..8e621a9 --- /dev/null +++ b/accounts/mdupont/main.tf @@ -0,0 +1,24 @@ +variable "google_oauth_client_secret" {} +variable "google_oauth_client_id" {} + +module cognito { + aws_account =var.aws_account_id + myemail ="jmdupont" + mydomain ="introspector" + mydomain_suffix = "meme" + #../../../17/ + aws_region = var.aws_region + env={ + region = var.aws_region + profile = var.profile + } + source = "../../environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool" + #source = "~/2024/12/17/cognito/terraform-aws-cognito-user-pool/examples/complete/" + #source = "git::https://github.com/meta-introspector/terraform-aws-cognito-user-pool.git?ref=feature/meta-meme" + google_oauth_client_secret=var.google_oauth_client_secret + google_oauth_client_id=var.google_oauth_client_id +} +output cognito{ + value = module.cognito + sensitive = true +} diff --git a/accounts/mdupont/output.tf b/accounts/mdupont/output.tf index a672016..ef3739e 100644 --- a/accounts/mdupont/output.tf +++ b/accounts/mdupont/output.tf @@ -6,9 +6,9 @@ output "iam_user" { value = var.iam_user } -output "table_name" { - value = var.table_name -} +#output "table_name" { +# value = var.table_name +#} output "project_name" { value = var.project_name @@ -19,15 +19,18 @@ output "aws_caller_identity_account_id" { } output "aws_s3_bucket_logging_name" { - value = data.aws_s3_bucket.terraform_logging.bucket + # value = data.aws_s3_bucket.terraform_logging.bucket + value = null } output "aws_s3_bucket_state_name" { - value = data.aws_s3_bucket.terraform_state.bucket + #value = data.aws_s3_bucket.terraform_state.bucket + value = null } output "aws_cloudtrail_service_account_id" { - value = data.aws_cloudtrail_service_account.main.id + #value = data.aws_cloudtrail_service_account.main.id + value = null } diff --git a/accounts/mdupont/variables.tf b/accounts/mdupont/variables.tf index 8560e4a..31b65a2 100644 --- a/accounts/mdupont/variables.tf +++ b/accounts/mdupont/variables.tf @@ -1,12 +1,17 @@ - variable "project_name" { - type = string - default = "swarms" - } +variable "project_name" { + type = string + default = "meta-meme" +} + +variable "profile" { + type = string + default = "default" +} variable "aws_region" { - type = string - default = "us-east-1" - } + type = string + default = "us-east-1" +} variable "aws_account_id" { type = string @@ -18,10 +23,10 @@ variable "aws_region" { default = "mdupont" } - variable "table_name" { - type = string - default = "swarms" - } +# variable "table_name" { +# type = string +# default = "meta-meme" +# } variable "lock_resource" { type = string diff --git a/accounts/swarms/Readme.md b/accounts/swarms/Readme.md new file mode 100644 index 0000000..db2cd84 --- /dev/null +++ b/accounts/swarms/Readme.md @@ -0,0 +1,60 @@ + +# credentials + +set up ~/.aws/credentials +``` +[swarms] +aws_access_key_id =${your key} +aws_secret_access_key=${your SECRET} +``` + +# install opentofu or terraform +# install aws cli +# install aws ssm plugin + +# create openai secret token + +TODO: +`aws ssm set-parameter --name "swarms_openai_key"` + +# tofu init +# tofu plan +# tofu apply +point the dns api.swarms.ai at the dns servers in godaddy + +`tofu state show module.swarms_api.module.alb.module.route53.data.aws_route53_zone.primary` + +```terraform +# module.swarms_api.module.alb.module.route53.data.aws_route53_zone.primary: +data "aws_route53_zone" "primary" { + arn = "arn:aws:route53:::hostedzone/Z04162952OP7P14Z97UWY" + caller_reference = "937599df-113d-4b02-8c75-4a20f8e6293e" + id = "Z04162952OP7P14Z97UWY" + name = "api.swarms.ai" + name_servers = [ + "ns-864.awsdns-44.net", + "ns-1595.awsdns-07.co.uk", + "ns-1331.awsdns-38.org", + "ns-463.awsdns-57.com", + ] + primary_name_server = "ns-864.awsdns-44.net" + private_zone = false + resource_record_set_count = 3 + tags = {} + zone_id = "Z04162952OP7P14Z97UWY" +} +``` +so we need 4 records + +1. NS api -> "ns-864.awsdns-44.net" +2. NS api -> "ns-1595.awsdns-07.co.uk" +3. NS api -> "ns-1331.awsdns-38.org" +4. NS api -> "ns-463.awsdns-57.com" + +see youtube or +https://youtu.be/3BI6_gq-lSU +https://dev.to/diegop0s/managing-your-godaddy-domain-with-route53-5f2p + +# tofu apply + +`tofu apply` diff --git a/accounts/swarms/main.tf b/accounts/swarms/main.tf new file mode 100644 index 0000000..2468b5b --- /dev/null +++ b/accounts/swarms/main.tf @@ -0,0 +1,49 @@ +locals { + #ami_name = "ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*" + ami_name = "ubuntu-minimal/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-minimal-*" + dns = "api.swarms.ai" + account = "916723593639" + region = "us-east-2" +} + +provider aws { + region = "us-east-2" + profile = "swarms" +} +output dns { + value = local.dns +} + +output profile { + value = "swarms" +} + +output account { + value = "916723593639" +} + +output region { + value = "us-east-2" +} + +#SLOW + data "aws_ami" "ami" { + most_recent = true + name_regex = "^${local.ami_name}" + } + +module "swarms_api" { + source = "../../environments/swarms-aws-agent-api/dev/us-east-1" + domain = local.dns + ami_id = data.aws_ami.ami.id + #"ami-0ad5d6c7069ce56ac" + #ami_id = "ami-0ad5d6c7069ce56ac" + + name = "swarms" + tags = {project="swarms"} + +} + +output api { + value = module.swarms_api +} diff --git a/accounts/swarms/main.txt b/accounts/swarms/main.txt new file mode 100644 index 0000000..65f2ed4 --- /dev/null +++ b/accounts/swarms/main.txt @@ -0,0 +1,1769 @@ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.alb.module.alb.data.aws_partition.current: Reading... +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.data.aws_partition.current: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.data.aws_ami.ami: Reading... +module.swarms_api.module.alb.module.acm.aws_route53_record.validation[0]: Refreshing state... [id=Z05433953QQ7Q4A095TXN__2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai._CNAME] +module.swarms_api.module.alb.aws_route53_zone.primary: Refreshing state... [id=Z05433953QQ7Q4A095TXN] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0474ec5cd8a0dd94b] +module.swarms_api.module.alb.module.alb.data.aws_partition.current: Read complete after 0s [id=aws] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.data.aws_partition.current: Read complete after 0s [id=aws] +module.swarms_api.module.alb.module.acm.aws_acm_certificate.this[0]: Refreshing state... [id=arn:aws:acm:us-east-2:767503528736:certificate/e4c7475f-9b0a-471f-b02c-134759b27668] +module.swarms_api.module.alb.data.aws_availability_zones.available: Reading... +module.swarms_api.module.asg_dynamic["t3.medium"].aws_iam_role.ssm: Refreshing state... [id=ssm-swarms-size-t3.medium] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.data.aws_iam_policy_document.assume_role_policy[0]: Reading... +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=1256122602] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_iam_role.this[0]: Refreshing state... [id=ssm-swarms-size-t3.medium-20241216190828467900000002] +module.swarms_api.module.alb.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.asg_dynamic["t3.medium"].aws_iam_instance_profile.ssm: Refreshing state... [id=ssm-swarms-size-t3.medium] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_iam_instance_profile.this[0]: Refreshing state... [id=ssm-swarms-size-t3.medium-20241216190828732900000006] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_iam_role_policy_attachment.this["AmazonSSMManagedInstanceCore"]: Refreshing state... [id=ssm-swarms-size-t3.medium-20241216190828467900000002-20241216190828805000000007] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-0834ea0a1cb0b3e20] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-09e45cb7d4cd6e40f] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-094056be43e3ff84b] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-0757b0776826bfbf8] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-01ab4dcce11853b1d] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-0bc4a9913a3ce2359] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-02678c830b74a5b5d] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-0c0af7a5f74ac54cc] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-078545b8b8efb0295] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-062a89232fc0355b2] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0513c8c9e488570c3] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-03f08e37b2201be9c] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0e6c2fef4377b5590] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0bbfe21680e03572b] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-01ab4dcce11853b1d1080289494] +module.swarms_api.module.alb.aws_lb_target_group.this: Refreshing state... [id=arn:aws:elasticloadbalancing:us-east-2:767503528736:targetgroup/swarms2024121619084133320000000c/001ec925380d4bcd] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-037ae6ac8e6a6692b] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-07c9642c84441a2c1] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-08546098c2d85fd6f] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-02e71f6004f433e3e] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-082b7f543579d5f38] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-05cfcd90f6d653fe1] +module.swarms_api.data.aws_ami.ami: Still reading... [10s elapsed] +module.swarms_api.data.aws_ami.ami: Read complete after 13s [id=ami-0ad5d6c7069ce56ac] + +OpenTofu used the selected providers to generate the following execution plan. Resource actions are indicated with +the following symbols: + + create + - destroy + +OpenTofu will perform the following actions: + + # module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template will be created + + resource "aws_launch_template" "ec2_launch_template" { + + arn = (known after apply) + + default_version = (known after apply) + + id = (known after apply) + + image_id = + + instance_type = "t3.medium" + + key_name = "mdupont-deployer-key" + + latest_version = (known after apply) + + name = (known after apply) + + name_prefix = "swarms-size-t3.medium-launch-template-" + + tags = { + + "instance_type" = "t3.medium" + + "name" = "swarms-size-t3.medium" + + "project" = "swarms" + } + + tags_all = { + + "instance_type" = "t3.medium" + + "name" = "swarms-size-t3.medium" + + "project" = "swarms" + } + + user_data = "IyEvYmluL2Jhc2gKZXhwb3J0IEhPTUU9L3Jvb3QKYXB0IHVwZGF0ZQphcHQtZ2V0IGluc3RhbGwgLXkgZWMyLWluc3RhbmNlLWNvbm5lY3QgZ2l0IHZpcnR1YWxlbnYKc25hcCBpbnN0YWxsIGFtYXpvbi1zc20tYWdlbnQgLS1jbGFzc2ljIHx8IGVjaG8gb29wczEKc25hcCBzdGFydCBhbWF6b24tc3NtLWFnZW50IHx8IGVjaG8gb29wczIKYXB0LWdldCBpbnN0YWxsIC15IC0tbm8taW5zdGFsbC1yZWNvbW1lbmRzIGNhLWNlcnRpZmljYXRlcz0yMDIzMDMxMSBjdXJsPTcuODguMS0xMCtkZWIxMnU3IHwgIGVjaG8gb29wcwpjdXJsIC1PICJodHRwczovL3MzLmFtYXpvbmF3cy5jb20vYW1hem9uY2xvdWR3YXRjaC1hZ2VudC91YnVudHUvJChkcGtnIC0tcHJpbnQtYXJjaGl0ZWN0dXJlKS9sYXRlc3QvYW1hem9uLWNsb3Vkd2F0Y2gtYWdlbnQuZGViIgpkcGtnIC1pIC1FIGFtYXpvbi1jbG91ZHdhdGNoLWFnZW50LmRlYgogCmlmIFsgISAtZCAiL29wdC9zd2FybXMvIiBdOyB0aGVuCiAgZ2l0IGNsb25lIGh0dHBzOi8vZ2l0aHViLmNvbS9qbWlrZWR1cG9udDIvc3dhcm1zICIvb3B0L3N3YXJtcy8iCmZpCmNkICIvb3B0L3N3YXJtcy8iIHx8IGV4aXQgMQpleHBvcnQgQlJBTkNIPWZlYXR1cmUvZWMyCmdpdCBzdGFzaApnaXQgY2hlY2tvdXQgLS1mb3JjZSAkQlJBTkNICmJhc2ggLXggL29wdC9zd2FybXMvYXBpL2luc3RhbGwuc2gK" + + + block_device_mappings { + + device_name = "/dev/sda1" + + + ebs { + + encrypted = "true" + + iops = (known after apply) + + throughput = (known after apply) + + volume_size = 30 + + volume_type = "gp3" + } + } + + + iam_instance_profile { + + name = "swarms-20241213150629570500000003" + } + + + network_interfaces { + + associate_public_ip_address = "true" + + delete_on_termination = "true" + + security_groups = (known after apply) + } + } + + # module.swarms_api.module.alb.module.acm.aws_acm_certificate.this[0] will be destroyed + # (because aws_acm_certificate.this is not in configuration) + - resource "aws_acm_certificate" "this" { + - arn = "arn:aws:acm:us-east-2:767503528736:certificate/e4c7475f-9b0a-471f-b02c-134759b27668" -> null + - domain_name = "api.swarms.ai" -> null + - domain_validation_options = [ + - { + - domain_name = "*.api.swarms.ai" + - resource_record_name = "_2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai." + - resource_record_type = "CNAME" + - resource_record_value = "_14001873a994d4ba61251844ed588611.zfyfvmchrl.acm-validations.aws." + }, + - { + - domain_name = "api.swarms.ai" + - resource_record_name = "_2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai." + - resource_record_type = "CNAME" + - resource_record_value = "_14001873a994d4ba61251844ed588611.zfyfvmchrl.acm-validations.aws." + }, + ] -> null + - id = "arn:aws:acm:us-east-2:767503528736:certificate/e4c7475f-9b0a-471f-b02c-134759b27668" -> null + - key_algorithm = "RSA_2048" -> null + - pending_renewal = false -> null + - renewal_eligibility = "INELIGIBLE" -> null + - renewal_summary = [] -> null + - status = "PENDING_VALIDATION" -> null + - subject_alternative_names = [ + - "*.api.swarms.ai", + - "api.swarms.ai", + ] -> null + - tags = {} -> null + - tags_all = {} -> null + - type = "AMAZON_ISSUED" -> null + - validation_emails = [] -> null + - validation_method = "DNS" -> null + + - options { + - certificate_transparency_logging_preference = "ENABLED" -> null + } + } + + # module.swarms_api.module.alb.module.acm.aws_route53_record.validation[0] will be destroyed + # (because aws_route53_record.validation is not in configuration) + - resource "aws_route53_record" "validation" { + - allow_overwrite = true -> null + - fqdn = "_2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai" -> null + - id = "Z05433953QQ7Q4A095TXN__2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai._CNAME" -> null + - multivalue_answer_routing_policy = false -> null + - name = "_2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai" -> null + - records = [ + - "_14001873a994d4ba61251844ed588611.zfyfvmchrl.acm-validations.aws.", + ] -> null + - ttl = 60 -> null + - type = "CNAME" -> null + - zone_id = "Z05433953QQ7Q4A095TXN" -> null + } + + # module.swarms_api.module.alb.module.alb.aws_lb.this[0] will be created + + resource "aws_lb" "this" { + + arn = (known after apply) + + arn_suffix = (known after apply) + + client_keep_alive = 7200 + + desync_mitigation_mode = "defensive" + + dns_name = (known after apply) + + drop_invalid_header_fields = true + + enable_deletion_protection = false + + enable_http2 = true + + enable_tls_version_and_cipher_suite_headers = false + + enable_waf_fail_open = false + + enable_xff_client_port = false + + enforce_security_group_inbound_rules_on_private_link_traffic = (known after apply) + + id = (known after apply) + + idle_timeout = 60 + + internal = (known after apply) + + ip_address_type = (known after apply) + + load_balancer_type = "application" + + name = "swarms-api" + + name_prefix = (known after apply) + + preserve_host_header = false + + security_groups = (known after apply) + + subnets = [ + + "1", + ] + + tags = { + + "Example" = "ex-swarms" + + "Name" = "ex-swarms" + + "Repository" = "https://github.com/terraform-aws-modules/terraform-aws-alb" + + "terraform-aws-modules" = "alb" + } + + tags_all = { + + "Example" = "ex-swarms" + + "Name" = "ex-swarms" + + "Repository" = "https://github.com/terraform-aws-modules/terraform-aws-alb" + + "terraform-aws-modules" = "alb" + } + + vpc_id = (known after apply) + + xff_header_processing_mode = "append" + + zone_id = (known after apply) + + + timeouts {} + } + + # module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_autoscaling_group.this[0] will be created + + resource "aws_autoscaling_group" "this" { + + arn = (known after apply) + + availability_zones = (known after apply) + + default_cooldown = (known after apply) + + desired_capacity = 1 + + force_delete = false + + force_delete_warm_pool = false + + health_check_grace_period = 300 + + health_check_type = "EC2" + + id = (known after apply) + + ignore_failed_scaling_activities = false + + load_balancers = (known after apply) + + max_size = 5 + + metrics_granularity = "1Minute" + + min_size = 1 + + name = (known after apply) + + name_prefix = "swarms-size-t3.medium-" + + predicted_capacity = (known after apply) + + protect_from_scale_in = false + + service_linked_role_arn = (known after apply) + + target_group_arns = (known after apply) + + termination_policies = [] + + vpc_zone_identifier = [ + + "subnet-057c90cfe7b2e5646", + ] + + wait_for_capacity_timeout = "10m" + + warm_pool_size = (known after apply) + + + launch_template { + + id = (known after apply) + + name = (known after apply) + + version = "$Latest" + } + + + tag { + + key = "Name" + + propagate_at_launch = true + + value = "swarms-size-t3.medium" + } + + + timeouts {} + } + + # module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_autoscaling_traffic_source_attachment.this["ex-alb"] will be created + + resource "aws_autoscaling_traffic_source_attachment" "this" { + + autoscaling_group_name = (known after apply) + + id = (known after apply) + + + traffic_source { + + identifier = "arn:aws:elasticloadbalancing:us-east-2:767503528736:targetgroup/swarms2024121619084133320000000c/001ec925380d4bcd" + + type = "elbv2" + } + } + + # module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0] will be created + + resource "aws_security_group" "this_name_prefix" { + + arn = (known after apply) + + description = "external group" + + egress = (known after apply) + + id = (known after apply) + + ingress = (known after apply) + + name = (known after apply) + + name_prefix = "swarms-external-" + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Name" = "swarms-external" + + "project" = "swarms" + } + + tags_all = { + + "Name" = "swarms-external" + + "project" = "swarms" + } + + vpc_id = "vpc-04f28c9347af48b55" + + + timeouts { + + create = "10m" + + delete = "15m" + } + } + + # module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0] will be created + + resource "aws_security_group_rule" "egress_rules" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "All protocols" + + from_port = -1 + + id = (known after apply) + + ipv6_cidr_blocks = [ + + "::/0", + ] + + prefix_list_ids = [] + + protocol = "-1" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = -1 + + type = "egress" + } + + # module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0] will be created + + resource "aws_security_group_rule" "ingress_rules" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "HTTPS" + + from_port = 443 + + id = (known after apply) + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 443 + + type = "ingress" + } + + # module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1] will be created + + resource "aws_security_group_rule" "ingress_rules" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "HTTP" + + from_port = 80 + + id = (known after apply) + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 80 + + type = "ingress" + } + + # module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0] will be created + + resource "aws_security_group" "this_name_prefix" { + + arn = (known after apply) + + description = "An internal security group" + + egress = (known after apply) + + id = (known after apply) + + ingress = (known after apply) + + name = (known after apply) + + name_prefix = "swarms-internal-" + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Name" = "swarms-internal" + + "project" = "swarms" + } + + tags_all = { + + "Name" = "swarms-internal" + + "project" = "swarms" + } + + vpc_id = "vpc-04f28c9347af48b55" + + + timeouts { + + create = "10m" + + delete = "15m" + } + } + + # module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0] will be created + + resource "aws_security_group_rule" "egress_rules" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "All protocols" + + from_port = -1 + + id = (known after apply) + + ipv6_cidr_blocks = [ + + "::/0", + ] + + prefix_list_ids = [] + + protocol = "-1" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = -1 + + type = "egress" + } + + # module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0] will be created + + resource "aws_security_group_rule" "ingress_with_source_security_group_id" { + + description = "Ingress Rule" + + from_port = 80 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 80 + + type = "ingress" + } + + + + +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply -auto-approve +module.swarms_api.module.alb.module.route53.data.aws_route53_zone.primary: Reading... +module.swarms_api.module.alb.module.alb.data.aws_partition.current: Reading... +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.alb.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.alb.module.alb.data.aws_partition.current: Read complete after 0s [id=aws] +module.swarms_api.module.alb.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.alb.module.route53.data.aws_route53_zone.primary: Read complete after 0s [id=Z04162952OP7P14Z97UWY] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.alb.module.tg.aws_lb_target_group.this: Refreshing state... [id=arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] + +OpenTofu used the selected providers to generate the following execution plan. Resource actions are indicated with +the following symbols: + + create + +OpenTofu will perform the following actions: + + # module.swarms_api.module.alb.module.alb.aws_lb.this[0] will be created + + resource "aws_lb" "this" { + + arn = (known after apply) + + arn_suffix = (known after apply) + + client_keep_alive = 7200 + + desync_mitigation_mode = "defensive" + + dns_name = (known after apply) + + drop_invalid_header_fields = true + + enable_deletion_protection = false + + enable_http2 = true + + enable_tls_version_and_cipher_suite_headers = false + + enable_waf_fail_open = false + + enable_xff_client_port = false + + enforce_security_group_inbound_rules_on_private_link_traffic = (known after apply) + + id = (known after apply) + + idle_timeout = 60 + + internal = (known after apply) + + ip_address_type = (known after apply) + + load_balancer_type = "application" + + name = "swarms-api" + + name_prefix = (known after apply) + + preserve_host_header = false + + security_groups = [ + + "sg-09e0227357b33ab1e", + ] + + subnets = [ + + "1", + ] + + tags = { + + "Example" = "ex-swarms" + + "Name" = "ex-swarms" + + "Repository" = "https://github.com/terraform-aws-modules/terraform-aws-alb" + + "terraform-aws-modules" = "alb" + } + + tags_all = { + + "Example" = "ex-swarms" + + "Name" = "ex-swarms" + + "Repository" = "https://github.com/terraform-aws-modules/terraform-aws-alb" + + "terraform-aws-modules" = "alb" + } + + vpc_id = (known after apply) + + xff_header_processing_mode = "append" + + zone_id = (known after apply) + + + timeouts {} + } + + # module.swarms_api.module.alb.module.route53.aws_route53_record.api-cname will be created + + resource "aws_route53_record" "api-cname" { + + allow_overwrite = (known after apply) + + fqdn = (known after apply) + + id = (known after apply) + + name = "api.swarms.ai" + + records = (known after apply) + + ttl = 5 + + type = "CNAME" + + zone_id = "Z04162952OP7P14Z97UWY" + } + +Plan: 2 to add, 0 to change, 0 to destroy. +module.swarms_api.module.alb.module.alb.aws_lb.this[0]: Creating... +╷ +│ Error: creating ELBv2 application Load Balancer (swarms-api): operation error Elastic Load Balancing v2: CreateLoadBalancer, https response error StatusCode: 400, RequestID: 33d4b4ca-900e-4148-989a-932978bd7bfc, InvalidSubnet: The subnet ID '1' is not valid +│ +│ with module.swarms_api.module.alb.module.alb.aws_lb.this[0], +│ on .terraform/modules/swarms_api.alb.alb/main.tf line 12, in resource "aws_lb" "this": +│ 12: resource "aws_lb" "this" { +│ +╵ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply -auto-approve +╷ +│ Error: Reference to undeclared module +│ +│ on ../../environments/swarms-aws-agent-api/dev/us-east-1/main.tf line 107, in output "alb": +│ 107: value = module.alb +│ +│ No module call named "alb" is declared in module.swarms_api. +╵ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply -auto-approve +╷ +│ Error: Reference to undeclared module +│ +│ on ../../environments/swarms-aws-agent-api/dev/us-east-1/main.tf line 107, in output "alb": +│ 107: value = module.alb +│ +│ No module call named "alb" is declared in module.swarms_api. +╵ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply -auto-approve +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.alb.module.tg.aws_lb_target_group.this: Refreshing state... [id=arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] + +OpenTofu used the selected providers to generate the following execution plan. Resource actions are indicated with +the following symbols: + - destroy + +OpenTofu will perform the following actions: + + # module.swarms_api.module.alb.module.tg.aws_lb_target_group.this will be destroyed + # (because aws_lb_target_group.this is not in configuration) + - resource "aws_lb_target_group" "this" { + - arn = "arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771" -> null + - arn_suffix = "targetgroup/swarms2024121620041957850000000a/e915743119e96771" -> null + - deregistration_delay = "10" -> null + - id = "arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771" -> null + - ip_address_type = "ipv4" -> null + - lambda_multi_value_headers_enabled = false -> null + - load_balancer_arns = [] -> null + - load_balancing_algorithm_type = "round_robin" -> null + - load_balancing_anomaly_mitigation = "off" -> null + - load_balancing_cross_zone_enabled = "use_load_balancer_configuration" -> null + - name = "swarms2024121620041957850000000a" -> null + - name_prefix = "swarms" -> null + - port = 80 -> null + - protocol = "HTTP" -> null + - protocol_version = "HTTP1" -> null + - proxy_protocol_v2 = false -> null + - slow_start = 0 -> null + - tags = {} -> null + - tags_all = {} -> null + - target_type = "instance" -> null + - vpc_id = "vpc-0b4cedd083227068d" -> null + + - health_check { + - enabled = true -> null + - healthy_threshold = 10 -> null + - interval = 130 -> null + - matcher = "200" -> null + - path = "/v1/docs" -> null + - port = "traffic-port" -> null + - protocol = "HTTP" -> null + - timeout = 120 -> null + - unhealthy_threshold = 10 -> null + } + + - stickiness { + - cookie_duration = 86400 -> null + - enabled = false -> null + - type = "lb_cookie" -> null + } + + - target_failover {} + + - target_group_health { + - dns_failover { + - minimum_healthy_targets_count = "1" -> null + - minimum_healthy_targets_percentage = "off" -> null + } + - unhealthy_state_routing { + - minimum_healthy_targets_count = 1 -> null + - minimum_healthy_targets_percentage = "off" -> null + } + } + + - target_health_state {} + } + +Plan: 0 to add, 0 to change, 1 to destroy. +module.swarms_api.module.alb.module.tg.aws_lb_target_group.this: Destroying... [id=arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771] +module.swarms_api.module.alb.module.tg.aws_lb_target_group.this: Destruction complete after 1s + +Apply complete! Resources: 0 added, 0 changed, 1 destroyed. + +Outputs: + +account = "916723593639" +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu output +account = "916723593639" +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu output +account = "916723593639" +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] + +Changes to Outputs: + + api = { + + security_group_id = "sg-09e0227357b33ab1e" + } + +You can apply this plan to save these new output values to the OpenTofu state, without changing any real +infrastructure. + +Do you want to perform these actions? + OpenTofu will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +account = "916723593639" +api = { + "security_group_id" = "sg-09e0227357b33ab1e" +} +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] + +No changes. Your infrastructure matches the configuration. + +OpenTofu has compared your real infrastructure against your configuration and found no differences, so no changes are +needed. + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +account = "916723593639" +api = { + "security_group_id" = "sg-09e0227357b33ab1e" +} +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] + +Changes to Outputs: + ~ api = { + + vpc = { + + cgw_arns = [] + + cgw_ids = [] + + default_network_acl_id = "acl-07d9084294e0492bd" + + default_route_table_id = "rtb-053dc8c3d37f2f58f" + + default_security_group_id = "sg-0a42c3ef338285431" + + default_vpc_arn = null + + default_vpc_cidr_block = null + + default_vpc_default_network_acl_id = null + + default_vpc_default_route_table_id = null + + default_vpc_default_security_group_id = null + + default_vpc_enable_dns_hostnames = null + + default_vpc_enable_dns_support = null + + default_vpc_id = null + + default_vpc_instance_tenancy = null + + default_vpc_main_route_table_id = null + + dhcp_options_id = null + + ec2_public_subnet_id_1 = 1 + + ec2_public_subnet_id_2 = 1 + + egress_only_internet_gateway_id = null + + igw_arn = "arn:aws:ec2:us-east-2:916723593639:internet-gateway/igw-022da11de86f41ae0" + + igw_id = "igw-022da11de86f41ae0" + + nat_ids = [] + + nat_public_ips = [] + + natgw_ids = [] + + private_ipv6_egress_route_ids = [] + + private_nat_gateway_route_ids = [] + + private_network_acl_arn = null + + private_network_acl_id = null + + private_route_table_association_ids = [ + + "rtbassoc-06f87a2b8106090d9", + + "rtbassoc-085a04c2858a3164e", + + "rtbassoc-0657562cf0177710a", + ] + + private_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + private_subnet_arns = [ + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc", + ] + + private_subnets = [ + + "subnet-0ea44af2f97e12b1a", + + "subnet-0de17633cfb166d29", + + "subnet-0e926a2b5ae3f5acc", + ] + + private_subnets_cidr_blocks = [ + + "10.0.0.0/24", + + "10.0.1.0/24", + + "10.0.2.0/24", + ] + + private_subnets_ipv6_cidr_blocks = [] + + public_internet_gateway_ipv6_route_id = null + + public_internet_gateway_route_id = "r-rtb-0445a2ea1c510657b1080289494" + + public_network_acl_arn = null + + public_network_acl_id = null + + public_route_table_association_ids = [ + + "rtbassoc-033c4fa608f79a594", + + "rtbassoc-000d8799150d904e6", + + "rtbassoc-0b047c79fe554852d", + ] + + public_route_table_ids = [ + + "rtb-0445a2ea1c510657b", + ] + + public_subnet_arns = [ + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0", + ] + + public_subnets = [ + + "subnet-04b3bdd4b0dc877f0", + + "subnet-051fecdee9028af79", + + "subnet-027f4abd09a6543f0", + ] + + public_subnets_cidr_blocks = [ + + "10.0.4.0/24", + + "10.0.5.0/24", + + "10.0.6.0/24", + ] + + public_subnets_ipv6_cidr_blocks = [] + + this_customer_gateway = {} + + vgw_arn = null + + vgw_id = null + + vpc = { + + azs = [ + + "us-east-2a", + + "us-east-2b", + + "us-east-2c", + ] + + cgw_arns = [] + + cgw_ids = [] + + database_internet_gateway_route_id = null + + database_ipv6_egress_route_id = null + + database_nat_gateway_route_ids = [] + + database_network_acl_arn = null + + database_network_acl_id = null + + database_route_table_association_ids = [] + + database_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + database_subnet_arns = [] + + database_subnet_group = null + + database_subnet_group_name = null + + database_subnet_objects = [] + + database_subnets = [] + + database_subnets_cidr_blocks = [] + + database_subnets_ipv6_cidr_blocks = [] + + default_network_acl_id = "acl-07d9084294e0492bd" + + default_route_table_id = "rtb-053dc8c3d37f2f58f" + + default_security_group_id = "sg-0a42c3ef338285431" + + default_vpc_arn = null + + default_vpc_cidr_block = null + + default_vpc_default_network_acl_id = null + + default_vpc_default_route_table_id = null + + default_vpc_default_security_group_id = null + + default_vpc_enable_dns_hostnames = null + + default_vpc_enable_dns_support = null + + default_vpc_id = null + + default_vpc_instance_tenancy = null + + default_vpc_main_route_table_id = null + + dhcp_options_id = null + + egress_only_internet_gateway_id = null + + elasticache_network_acl_arn = null + + elasticache_network_acl_id = null + + elasticache_route_table_association_ids = [] + + elasticache_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + elasticache_subnet_arns = [] + + elasticache_subnet_group = null + + elasticache_subnet_group_name = null + + elasticache_subnet_objects = [] + + elasticache_subnets = [] + + elasticache_subnets_cidr_blocks = [] + + elasticache_subnets_ipv6_cidr_blocks = [] + + igw_arn = "arn:aws:ec2:us-east-2:916723593639:internet-gateway/igw-022da11de86f41ae0" + + igw_id = "igw-022da11de86f41ae0" + + intra_network_acl_arn = null + + intra_network_acl_id = null + + intra_route_table_association_ids = [] + + intra_route_table_ids = [] + + intra_subnet_arns = [] + + intra_subnet_objects = [] + + intra_subnets = [] + + intra_subnets_cidr_blocks = [] + + intra_subnets_ipv6_cidr_blocks = [] + + name = "swarms" + + nat_ids = [] + + nat_public_ips = [] + + natgw_ids = [] + + natgw_interface_ids = [] + + outpost_network_acl_arn = null + + outpost_network_acl_id = null + + outpost_subnet_arns = [] + + outpost_subnet_objects = [] + + outpost_subnets = [] + + outpost_subnets_cidr_blocks = [] + + outpost_subnets_ipv6_cidr_blocks = [] + + private_ipv6_egress_route_ids = [] + + private_nat_gateway_route_ids = [] + + private_network_acl_arn = null + + private_network_acl_id = null + + private_route_table_association_ids = [ + + "rtbassoc-06f87a2b8106090d9", + + "rtbassoc-085a04c2858a3164e", + + "rtbassoc-0657562cf0177710a", + ] + + private_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + private_subnet_arns = [ + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc", + ] + + private_subnet_objects = [ + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2a" + + availability_zone_id = "use2-az1" + + cidr_block = "10.0.0.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-0ea44af2f97e12b1a" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "Private Subnet One" + + project = "swarms" + } + + tags_all = { + + Name = "Private Subnet One" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2b" + + availability_zone_id = "use2-az2" + + cidr_block = "10.0.1.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-0de17633cfb166d29" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "Private Subnet Two" + + project = "swarms" + } + + tags_all = { + + Name = "Private Subnet Two" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2c" + + availability_zone_id = "use2-az3" + + cidr_block = "10.0.2.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-0e926a2b5ae3f5acc" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "swarms-private-us-east-2c" + + project = "swarms" + } + + tags_all = { + + Name = "swarms-private-us-east-2c" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + ] + + private_subnets = [ + + "subnet-0ea44af2f97e12b1a", + + "subnet-0de17633cfb166d29", + + "subnet-0e926a2b5ae3f5acc", + ] + + private_subnets_cidr_blocks = [ + + "10.0.0.0/24", + + "10.0.1.0/24", + + "10.0.2.0/24", + ] + + private_subnets_ipv6_cidr_blocks = [] + + public_internet_gateway_ipv6_route_id = null + + public_internet_gateway_route_id = "r-rtb-0445a2ea1c510657b1080289494" + + public_network_acl_arn = null + + public_network_acl_id = null + + public_route_table_association_ids = [ + + "rtbassoc-033c4fa608f79a594", + + "rtbassoc-000d8799150d904e6", + + "rtbassoc-0b047c79fe554852d", + ] + + public_route_table_ids = [ + + "rtb-0445a2ea1c510657b", + ] + + public_subnet_arns = [ + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0", + ] + + public_subnet_objects = [ + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2a" + + availability_zone_id = "use2-az1" + + cidr_block = "10.0.4.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-04b3bdd4b0dc877f0" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "swarms-public-us-east-2a" + + project = "swarms" + } + + tags_all = { + + Name = "swarms-public-us-east-2a" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2b" + + availability_zone_id = "use2-az2" + + cidr_block = "10.0.5.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-051fecdee9028af79" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "swarms-public-us-east-2b" + + project = "swarms" + } + + tags_all = { + + Name = "swarms-public-us-east-2b" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2c" + + availability_zone_id = "use2-az3" + + cidr_block = "10.0.6.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-027f4abd09a6543f0" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "swarms-public-us-east-2c" + + project = "swarms" + } + + tags_all = { + + Name = "swarms-public-us-east-2c" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + ] + + public_subnets = [ + + "subnet-04b3bdd4b0dc877f0", + + "subnet-051fecdee9028af79", + + "subnet-027f4abd09a6543f0", + ] + + public_subnets_cidr_blocks = [ + + "10.0.4.0/24", + + "10.0.5.0/24", + + "10.0.6.0/24", + ] + + public_subnets_ipv6_cidr_blocks = [] + + redshift_network_acl_arn = null + + redshift_network_acl_id = null + + redshift_public_route_table_association_ids = [] + + redshift_route_table_association_ids = [] + + redshift_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + redshift_subnet_arns = [] + + redshift_subnet_group = null + + redshift_subnet_objects = [] + + redshift_subnets = [] + + redshift_subnets_cidr_blocks = [] + + redshift_subnets_ipv6_cidr_blocks = [] + + this_customer_gateway = {} + + vgw_arn = null + + vgw_id = null + + vpc_arn = "arn:aws:ec2:us-east-2:916723593639:vpc/vpc-0b4cedd083227068d" + + vpc_cidr_block = "10.0.0.0/16" + + vpc_enable_dns_hostnames = true + + vpc_enable_dns_support = true + + vpc_flow_log_cloudwatch_iam_role_arn = "" + + vpc_flow_log_deliver_cross_account_role = null + + vpc_flow_log_destination_arn = "" + + vpc_flow_log_destination_type = "cloud-watch-logs" + + vpc_flow_log_id = null + + vpc_id = "vpc-0b4cedd083227068d" + + vpc_instance_tenancy = "default" + + vpc_ipv6_association_id = "" + + vpc_ipv6_cidr_block = "" + + vpc_main_route_table_id = "rtb-053dc8c3d37f2f58f" + + vpc_owner_id = "916723593639" + + vpc_secondary_cidr_blocks = [] + } + + vpc_arn = "arn:aws:ec2:us-east-2:916723593639:vpc/vpc-0b4cedd083227068d" + + vpc_cidr_block = "10.0.0.0/16" + + vpc_enable_dns_hostnames = true + + vpc_enable_dns_support = true + + vpc_flow_log_cloudwatch_iam_role_arn = "" + + vpc_flow_log_destination_arn = "" + + vpc_flow_log_destination_type = "cloud-watch-logs" + + vpc_flow_log_id = null + + vpc_id = "vpc-0b4cedd083227068d" + + vpc_instance_tenancy = "default" + + vpc_ipv6_association_id = "" + + vpc_ipv6_cidr_block = "" + + vpc_main_route_table_id = "rtb-053dc8c3d37f2f58f" + + vpc_owner_id = "916723593639" + + vpc_secondary_cidr_blocks = [] + } + # (1 unchanged attribute hidden) + } + +You can apply this plan to save these new output values to the OpenTofu state, without changing any real +infrastructure. + +Do you want to perform these actions? + OpenTofu will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +account = "916723593639" +api = { + "security_group_id" = "sg-09e0227357b33ab1e" + "vpc" = { + "cgw_arns" = [] + "cgw_ids" = [] + "default_network_acl_id" = "acl-07d9084294e0492bd" + "default_route_table_id" = "rtb-053dc8c3d37f2f58f" + "default_security_group_id" = "sg-0a42c3ef338285431" + "default_vpc_arn" = null + "default_vpc_cidr_block" = null + "default_vpc_default_network_acl_id" = null + "default_vpc_default_route_table_id" = null + "default_vpc_default_security_group_id" = null + "default_vpc_enable_dns_hostnames" = null + "default_vpc_enable_dns_support" = null + "default_vpc_id" = null + "default_vpc_instance_tenancy" = null + "default_vpc_main_route_table_id" = null + "dhcp_options_id" = null + "ec2_public_subnet_id_1" = 1 + "ec2_public_subnet_id_2" = 1 + "egress_only_internet_gateway_id" = null + "igw_arn" = "arn:aws:ec2:us-east-2:916723593639:internet-gateway/igw-022da11de86f41ae0" + "igw_id" = "igw-022da11de86f41ae0" + "nat_ids" = [] + "nat_public_ips" = tolist([]) + "natgw_ids" = [] + "private_ipv6_egress_route_ids" = [] + "private_nat_gateway_route_ids" = [] + "private_network_acl_arn" = null + "private_network_acl_id" = null + "private_route_table_association_ids" = [ + "rtbassoc-06f87a2b8106090d9", + "rtbassoc-085a04c2858a3164e", + "rtbassoc-0657562cf0177710a", + ] + "private_route_table_ids" = [ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ] + "private_subnet_arns" = [ + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc", + ] + "private_subnets" = [ + "subnet-0ea44af2f97e12b1a", + "subnet-0de17633cfb166d29", + "subnet-0e926a2b5ae3f5acc", + ] + "private_subnets_cidr_blocks" = tolist([ + "10.0.0.0/24", + "10.0.1.0/24", + "10.0.2.0/24", + ]) + "private_subnets_ipv6_cidr_blocks" = tolist([]) + "public_internet_gateway_ipv6_route_id" = null + "public_internet_gateway_route_id" = "r-rtb-0445a2ea1c510657b1080289494" + "public_network_acl_arn" = null + "public_network_acl_id" = null + "public_route_table_association_ids" = [ + "rtbassoc-033c4fa608f79a594", + "rtbassoc-000d8799150d904e6", + "rtbassoc-0b047c79fe554852d", + ] + "public_route_table_ids" = [ + "rtb-0445a2ea1c510657b", + ] + "public_subnet_arns" = [ + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0", + ] + "public_subnets" = [ + "subnet-04b3bdd4b0dc877f0", + "subnet-051fecdee9028af79", + "subnet-027f4abd09a6543f0", + ] + "public_subnets_cidr_blocks" = tolist([ + "10.0.4.0/24", + "10.0.5.0/24", + "10.0.6.0/24", + ]) + "public_subnets_ipv6_cidr_blocks" = tolist([]) + "this_customer_gateway" = {} + "vgw_arn" = null + "vgw_id" = null + "vpc" = { + "azs" = tolist([ + "us-east-2a", + "us-east-2b", + "us-east-2c", + ]) + "cgw_arns" = [] + "cgw_ids" = [] + "database_internet_gateway_route_id" = null + "database_ipv6_egress_route_id" = null + "database_nat_gateway_route_ids" = [] + "database_network_acl_arn" = null + "database_network_acl_id" = null + "database_route_table_association_ids" = [] + "database_route_table_ids" = tolist([ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ]) + "database_subnet_arns" = [] + "database_subnet_group" = null + "database_subnet_group_name" = null + "database_subnet_objects" = [] + "database_subnets" = [] + "database_subnets_cidr_blocks" = tolist([]) + "database_subnets_ipv6_cidr_blocks" = tolist([]) + "default_network_acl_id" = "acl-07d9084294e0492bd" + "default_route_table_id" = "rtb-053dc8c3d37f2f58f" + "default_security_group_id" = "sg-0a42c3ef338285431" + "default_vpc_arn" = null + "default_vpc_cidr_block" = null + "default_vpc_default_network_acl_id" = null + "default_vpc_default_route_table_id" = null + "default_vpc_default_security_group_id" = null + "default_vpc_enable_dns_hostnames" = null + "default_vpc_enable_dns_support" = null + "default_vpc_id" = null + "default_vpc_instance_tenancy" = null + "default_vpc_main_route_table_id" = null + "dhcp_options_id" = null + "egress_only_internet_gateway_id" = null + "elasticache_network_acl_arn" = null + "elasticache_network_acl_id" = null + "elasticache_route_table_association_ids" = [] + "elasticache_route_table_ids" = [ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ] + "elasticache_subnet_arns" = [] + "elasticache_subnet_group" = null + "elasticache_subnet_group_name" = null + "elasticache_subnet_objects" = [] + "elasticache_subnets" = [] + "elasticache_subnets_cidr_blocks" = tolist([]) + "elasticache_subnets_ipv6_cidr_blocks" = tolist([]) + "igw_arn" = "arn:aws:ec2:us-east-2:916723593639:internet-gateway/igw-022da11de86f41ae0" + "igw_id" = "igw-022da11de86f41ae0" + "intra_network_acl_arn" = null + "intra_network_acl_id" = null + "intra_route_table_association_ids" = [] + "intra_route_table_ids" = [] + "intra_subnet_arns" = [] + "intra_subnet_objects" = [] + "intra_subnets" = [] + "intra_subnets_cidr_blocks" = tolist([]) + "intra_subnets_ipv6_cidr_blocks" = tolist([]) + "name" = "swarms" + "nat_ids" = [] + "nat_public_ips" = tolist([]) + "natgw_ids" = [] + "natgw_interface_ids" = [] + "outpost_network_acl_arn" = null + "outpost_network_acl_id" = null + "outpost_subnet_arns" = [] + "outpost_subnet_objects" = [] + "outpost_subnets" = [] + "outpost_subnets_cidr_blocks" = tolist([]) + "outpost_subnets_ipv6_cidr_blocks" = tolist([]) + "private_ipv6_egress_route_ids" = [] + "private_nat_gateway_route_ids" = [] + "private_network_acl_arn" = null + "private_network_acl_id" = null + "private_route_table_association_ids" = [ + "rtbassoc-06f87a2b8106090d9", + "rtbassoc-085a04c2858a3164e", + "rtbassoc-0657562cf0177710a", + ] + "private_route_table_ids" = [ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ] + "private_subnet_arns" = [ + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc", + ] + "private_subnet_objects" = [ + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2a" + "availability_zone_id" = "use2-az1" + "cidr_block" = "10.0.0.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-0ea44af2f97e12b1a" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "Private Subnet One" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "Private Subnet One" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2b" + "availability_zone_id" = "use2-az2" + "cidr_block" = "10.0.1.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-0de17633cfb166d29" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "Private Subnet Two" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "Private Subnet Two" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2c" + "availability_zone_id" = "use2-az3" + "cidr_block" = "10.0.2.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-0e926a2b5ae3f5acc" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "swarms-private-us-east-2c" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "swarms-private-us-east-2c" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + ] + "private_subnets" = [ + "subnet-0ea44af2f97e12b1a", + "subnet-0de17633cfb166d29", + "subnet-0e926a2b5ae3f5acc", + ] + "private_subnets_cidr_blocks" = tolist([ + "10.0.0.0/24", + "10.0.1.0/24", + "10.0.2.0/24", + ]) + "private_subnets_ipv6_cidr_blocks" = tolist([]) + "public_internet_gateway_ipv6_route_id" = null + "public_internet_gateway_route_id" = "r-rtb-0445a2ea1c510657b1080289494" + "public_network_acl_arn" = null + "public_network_acl_id" = null + "public_route_table_association_ids" = [ + "rtbassoc-033c4fa608f79a594", + "rtbassoc-000d8799150d904e6", + "rtbassoc-0b047c79fe554852d", + ] + "public_route_table_ids" = [ + "rtb-0445a2ea1c510657b", + ] + "public_subnet_arns" = [ + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0", + ] + "public_subnet_objects" = [ + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2a" + "availability_zone_id" = "use2-az1" + "cidr_block" = "10.0.4.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-04b3bdd4b0dc877f0" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "swarms-public-us-east-2a" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "swarms-public-us-east-2a" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2b" + "availability_zone_id" = "use2-az2" + "cidr_block" = "10.0.5.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-051fecdee9028af79" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "swarms-public-us-east-2b" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "swarms-public-us-east-2b" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2c" + "availability_zone_id" = "use2-az3" + "cidr_block" = "10.0.6.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-027f4abd09a6543f0" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "swarms-public-us-east-2c" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "swarms-public-us-east-2c" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + ] + "public_subnets" = [ + "subnet-04b3bdd4b0dc877f0", + "subnet-051fecdee9028af79", + "subnet-027f4abd09a6543f0", + ] + "public_subnets_cidr_blocks" = tolist([ + "10.0.4.0/24", + "10.0.5.0/24", + "10.0.6.0/24", + ]) + "public_subnets_ipv6_cidr_blocks" = tolist([]) + "redshift_network_acl_arn" = null + "redshift_network_acl_id" = null + "redshift_public_route_table_association_ids" = [] + "redshift_route_table_association_ids" = [] + "redshift_route_table_ids" = tolist([ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ]) + "redshift_subnet_arns" = [] + "redshift_subnet_group" = null + "redshift_subnet_objects" = [] + "redshift_subnets" = [] + "redshift_subnets_cidr_blocks" = tolist([]) + "redshift_subnets_ipv6_cidr_blocks" = tolist([]) + "this_customer_gateway" = {} + "vgw_arn" = null + "vgw_id" = null + "vpc_arn" = "arn:aws:ec2:us-east-2:916723593639:vpc/vpc-0b4cedd083227068d" + "vpc_cidr_block" = "10.0.0.0/16" + "vpc_enable_dns_hostnames" = true + "vpc_enable_dns_support" = true + "vpc_flow_log_cloudwatch_iam_role_arn" = "" + "vpc_flow_log_deliver_cross_account_role" = null + "vpc_flow_log_destination_arn" = "" + "vpc_flow_log_destination_type" = "cloud-watch-logs" + "vpc_flow_log_id" = null + "vpc_id" = "vpc-0b4cedd083227068d" + "vpc_instance_tenancy" = "default" + "vpc_ipv6_association_id" = "" + "vpc_ipv6_cidr_block" = "" + "vpc_main_route_table_id" = "rtb-053dc8c3d37f2f58f" + "vpc_owner_id" = "916723593639" + "vpc_secondary_cidr_blocks" = tolist([]) + } + "vpc_arn" = "arn:aws:ec2:us-east-2:916723593639:vpc/vpc-0b4cedd083227068d" + "vpc_cidr_block" = "10.0.0.0/16" + "vpc_enable_dns_hostnames" = true + "vpc_enable_dns_support" = true + "vpc_flow_log_cloudwatch_iam_role_arn" = "" + "vpc_flow_log_destination_arn" = "" + "vpc_flow_log_destination_type" = "cloud-watch-logs" + "vpc_flow_log_id" = null + "vpc_id" = "vpc-0b4cedd083227068d" + "vpc_instance_tenancy" = "default" + "vpc_ipv6_association_id" = "" + "vpc_ipv6_cidr_block" = "" + "vpc_main_route_table_id" = "rtb-053dc8c3d37f2f58f" + "vpc_owner_id" = "916723593639" + "vpc_secondary_cidr_blocks" = tolist([]) + } +} +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/.terraform.lock.hcl b/environments/swarms-aws-agent-api/dev/us-east-1/.terraform.lock.hcl new file mode 100644 index 0000000..20cd0a1 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/.terraform.lock.hcl @@ -0,0 +1,42 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/aws" { + version = "5.81.0" + constraints = ">= 5.55.0" + hashes = [ + "h1:ird967uf44WBZ0u9rMlOdO4nCKJZMlUknLcD3lz4dWs=", + "zh:0d0c7cc1e16b16cd00ab36de35038162e1871b51a902e9016d08c55c9fc4de35", + "zh:0e4b8c6e46999015292ab4fb9a430ab00107a276f25243552cde66db53c58661", + "zh:30041314cdd4e877d75ee8c9d36aecfca094e276f7a3d8150f929cf5169b2fa5", + "zh:5ebd248ce3e5a7ef9cc2f41499668f182146325e10ea305c70469122f6161a13", + "zh:888a69d371373b418549e03f5922becb2b8074cb463552ecfa65f30455708db0", + "zh:8a21bb7fe60383ff5ca9db8630a287e41fd520d2514c08874a16dc74500fadd7", + "zh:9c4663dcbfe58544642d70ebfdc6c5fa91592ff04164f77c655e32e6024483e2", + "zh:b322873f1209443a8407d5f782d7d917de6a1391239dbd0e7f809ce6507bed76", + "zh:b7c9d5ca14b818b5932ac58a490646a425ebc41b33d149090aa5f48d1ca35c99", + "zh:e76cd202b03749f3082b0cbe849fd2e731cf3f9a6aa994d2d629602c3aede36c", + ] +} + +provider "registry.opentofu.org/hashicorp/cloudflare" { + version = "4.48.0" + hashes = [ + "h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=", + "zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c", + "zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997", + "zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b", + "zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb", + "zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153", + "zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8", + "zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f", + "zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04", + "zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937", + "zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49", + "zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f", + "zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c", + "zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532", + "zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f", + "zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758", + ] +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/Makefile b/environments/swarms-aws-agent-api/dev/us-east-1/Makefile new file mode 100644 index 0000000..6a6bebf --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/Makefile @@ -0,0 +1,4 @@ +doit : + tofu init + tofu plan + tofu apply -auto-approve diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md index 4f57e60..144ec90 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md +++ b/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md @@ -1,3 +1,10 @@ +# Done + 1. create vpc +2. create iam roles and users +3. create asg and alb and route53 and acm + +# Todo + +1. create cognito pool 2. create ami -4. create iam roles and users diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/alb/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/alb/main.tf new file mode 100644 index 0000000..e69de29 diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/asg/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/asg/main.tf new file mode 100644 index 0000000..e69de29 diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/cloudflare.tf b/environments/swarms-aws-agent-api/dev/us-east-1/cloudflare.tf new file mode 100644 index 0000000..e066b4a --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/cloudflare.tf @@ -0,0 +1,4 @@ +#provider "cloudflare" { +# api_token = var.cloudflare_api_token +#} +#export CLOUDFLARE_API_TOKEN=`cat ~/.cloudflare` diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/components/Readme.md index 3f6653d..2cb2a7a 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/Readme.md +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/Readme.md @@ -1,12 +1,28 @@ # Plan ## Phase 1 + +0. create standard vpc with a private network to host ec2, +this uses a natgw that costs money https://aws.amazon.com/vpc/pricing/ +so we will put the intances in public for now and use security groups to limit access. + 1. create minimal ec2 instance in machine_image + +terraform for ubuntu python uvicorn fastapi nginx systemd server with custom git modules + 2. create minimal ec2 ami from instance in machine_image 3. create autoscaling_group of size 1 for image -4. create application_load_balancer -5. create dns_entry -6. create cognito_user_pool for login +4. create application load balancer +5. create dns_entry + + +# todo + +1. alb sticky sessions :send users back to server via sticky sessions or some id. +2. create cognito user pool for login 7. create work_queue -8. create lambda_workers on queue +8. create lambda_workers on queue 9. create resource_launchers to create new resources. - +10. packer ami for ubuntu python uvicorn fastapi nginx systemd server with custom git modules +11. setup cicd +12. create jobt to generate diagrams and documentation from site +13. use swarms agent to improve code diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/Readme.md index 5a93134..0ca19bd 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/Readme.md +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/Readme.md @@ -1 +1,3 @@ application_load_balancer + +https://github.com/terraform-aws-modules/terraform-aws-alb.git diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/cloudflare.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/cloudflare.tf new file mode 100644 index 0000000..be2ab3b --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/cloudflare.tf @@ -0,0 +1,18 @@ +#variable "dns_name" {} +#variable "account_id" {} +# data "cloudflare_zone" "zone" { +# count =0 +# name = var.dns_name +# account_id = var.account_id +# } + +# resource "cloudflare_record" "aws-ns-record" { +# count = 0 +# # count = "${length(aws_route53_zone.primary.name_servers)}" +# #domain = "${var.domain_name}" +# name = var.domain_name +# # zone_id = data.cloudflare_zone[0].zone.id +# content = "${element(aws_route53_zone.primary.name_servers, count.index)}" +# type = "NS" +# priority = 1 +# } diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/https/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/https/main.tf new file mode 100644 index 0000000..1d97095 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/https/main.tf @@ -0,0 +1,36 @@ +variable alb_arn{} +variable domain_name{} +variable zone_id{} +variable aws_lb_target_group_arn{} + +module "acm" { +# count = 0 + source = "terraform-aws-modules/acm/aws" + version = "~> 4.0" + domain_name = var.domain_name + zone_id = var.zone_id + subject_alternative_names = [ + "*.${var.domain_name}" + ] +} + +## now we just lift the listener code +resource "aws_lb_listener" "this" { + port = 443 + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-Res-2021-06" + certificate_arn = module.acm.acm_certificate_arn + load_balancer_arn = var.alb_arn + #additional_certificate_arns = [module.wildcard_cert.acm_certificate_arn] + # #forward = { + # #target_group_key = "ex-swarms-instance" + # target_group_arn = "ex-swarms-instance" + # #target_group = [] + + default_action { + target_group_arn =var.aws_lb_target_group_arn + #module.alb.target_groups["ex-lambda-with-trigger"].arn + #length(try(default_action.value.target_groups, [])) > 0 ? null : try(default_action.value.arn, aws_lb_target_group.this[default_action.value.target_group_key].arn, null) + type = "forward" + } +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/main.tf new file mode 100644 index 0000000..5762415 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/main.tf @@ -0,0 +1,68 @@ +variable vpc_id {} +variable security_group_id {} # = local.name +variable name {} # = local.name +variable domain_name {} +variable public_subnets {} #= module.vpc.public_subnets + +data "aws_availability_zones" "available" {} + +locals { + name = "ex-${basename(path.cwd)}" + + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + + tags = { + Name = local.name + Example = local.name + Repository = "https://github.com/terraform-aws-modules/terraform-aws-alb" + } +} + +module "alb" { + source = "terraform-aws-modules/alb/aws" + version = "9.12.0" + name = "${var.name}-api" # local.name + vpc_id = var.vpc_id # module.vpc.vpc_id + subnets = var.public_subnets # module.vpc.public_subnets + enable_deletion_protection = false + create_security_group=false + security_groups = [var.security_group_id] + client_keep_alive = 7200 + tags = local.tags +} + + + + + +output dns { + value = module.alb.dns_name +} + +module "route53" { + source = "./route53/" + alb_dns_name = module.alb.dns_name + alb_dns_zone = module.alb.zone_id + domain_name = var.domain_name +} + +module "tg" { + source = "./target_group/" + vpc_id = var.vpc_id # module.vpc.vpc_id +} + +module "https" { + source = "./https/" + # vpc_id = var.vpc_id # module.vpc.vpc_id + zone_id = module.route53.primary_zone_id + domain_name = var.domain_name + alb_arn = module.alb.arn + aws_lb_target_group_arn = module.tg.alb_target_group_arn + #aws_lb_target_group.this.arn +} + + +output alb_target_group_arn { + value = module.tg.alb_target_group_arn +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/notes.prg b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/notes.prg new file mode 100644 index 0000000..df1c054 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/notes.prg @@ -0,0 +1,479 @@ +# access_logs = { +# bucket = module.log_bucket.s3_bucket_id +# prefix = "access-logs" +# } + +# connection_logs = { +# bucket = module.log_bucket.s3_bucket_id +# enabled = true +# prefix = "connection-logs" +# } + + # ex-http-https-redirect = { + # port = 80 + # protocol = "HTTP" + # redirect = { + # port = "443" + # protocol = "HTTPS" + # status_code = "HTTP_301" + # } + # } + # rules = { + # ex-fixed-response = { + # priority = 3 + # actions = [{ + # type = "fixed-response" + # content_type = "text/plain" + # status_code = 200 + # message_body = "This is a fixed response" + # }] + # conditions = [{ + # http_header = { + # http_header_name = "x-Gimme-Fixed-Response" + # values = ["yes", "please", "right now"] + # } + # }] + # } + # ex-weighted-forward = { + # priority = 4 + # actions = [{ + # type = "weighted-forward" + # target_groups = [ + # { + # target_group_key = "ex-lambda-with-trigger" + # weight = 2 + # }, + # { + # target_group_key = "ex-instance" + # weight = 1 + # } + # ] + # stickiness = { + # enabled = true + # duration = 3600 + # } + # }] + # conditions = [{ + # query_string = { + # key = "weighted" + # value = "true" + # } + # }] + # } + # ex-redirect = { + # priority = 5000 + # actions = [{ + # type = "redirect" + # status_code = "HTTP_302" + # host = "www.youtube.com" + # path = "/watch" + # query = "v=dQw4w9WgXcQ" + # protocol = "HTTPS" + # }] + # conditions = [{ + # query_string = [{ + # key = "video" + # value = "random" + # }, + # { + # key = "image" + # value = "next" + # }] + # }] + # } + # } + # } + # ex-http-weighted-target = { + # port = 81 + # protocol = "HTTP" + # weighted_forward = { + # target_groups = [ + # { + # target_group_key = "ex-lambda-with-trigger" + # weight = 60 + # }, + # { + # target_group_key = "ex-instance" + # weight = 40 + # } + # ] + # } + # } + # ex-fixed-response = { + # port = 82 + # protocol = "HTTP" + # fixed_response = { + # content_type = "text/plain" + # message_body = "Fixed message" + # status_code = "200" + # } + # } + # rules = { + # ex-cognito = { + # actions = [ + # { + # type = "authenticate-cognito" + # on_unauthenticated_request = "authenticate" + # session_cookie_name = "session-${local.name}" + # session_timeout = 3600 + # user_pool_arn = aws_cognito_user_pool.this.arn + # user_pool_client_id = aws_cognito_user_pool_client.this.id + # user_pool_domain = aws_cognito_user_pool_domain.this.domain + # }, + # { + # type = "forward" + # target_group_key = "ex-instance" + # } + # ] + # conditions = [{ + # path_pattern = { + # values = ["/some/auth/required/route"] + # } + # }] + # } + # ex-fixed-response = { + # priority = 3 + # actions = [{ + # type = "fixed-response" + # content_type = "text/plain" + # status_code = 200 + # message_body = "This is a fixed response" + # }] + # conditions = [{ + # http_header = { + # http_header_name = "x-Gimme-Fixed-Response" + # values = ["yes", "please", "right now"] + # } + # }] + # } + # ex-weighted-forward = { + # priority = 4 + # actions = [{ + # type = "weighted-forward" + # target_groups = [ + # { + # target_group_key = "ex-instance" + # weight = 2 + # }, + # { + # target_group_key = "ex-lambda-with-trigger" + # weight = 1 + # } + # ] + # stickiness = { + # enabled = true + # duration = 3600 + # } + # }] + # conditions = [{ + # query_string = { + # key = "weighted" + # value = "true" + # }, + # path_pattern = { + # values = ["/some/path"] + # } + # }] + # } + # ex-redirect = { + # priority = 5000 + # actions = [{ + # type = "redirect" + # status_code = "HTTP_302" + # host = "www.youtube.com" + # path = "/watch" + # query = "v=dQw4w9WgXcQ" + # protocol = "HTTPS" + # }] + # conditions = [{ + # query_string = { + # key = "video" + # value = "random" + # } + # }] + # } + # } + # ex-cognito = { + # port = 444 + # protocol = "HTTPS" + # certificate_arn = module.acm.acm_certificate_arn + # authenticate_cognito = { + # authentication_request_extra_params = { + # display = "page" + # prompt = "login" + # } + # on_unauthenticated_request = "authenticate" + # session_cookie_name = "session-${local.name}" + # session_timeout = 3600 + # user_pool_arn = aws_cognito_user_pool.this.arn + # user_pool_client_id = aws_cognito_user_pool_client.this.id + # user_pool_domain = aws_cognito_user_pool_domain.this.domain + # } + # forward = { + # target_group_key = "ex-instance" + # } + # rules = { + # ex-oidc = { + # priority = 2 + # actions = [ + # { + # type = "authenticate-oidc" + # authentication_request_extra_params = { + # display = "page" + # prompt = "login" + # } + # authorization_endpoint = "https://${var.domain_name}/auth" + # client_id = "client_id" + # client_secret = "client_secret" + # issuer = "https://${var.domain_name}" + # token_endpoint = "https://${var.domain_name}/token" + # user_info_endpoint = "https://${var.domain_name}/user_info" + # }, + # { + # type = "forward" + # target_group_key = "ex-lambda-with-trigger" + # } + # ] + # conditions = [{ + # host_header = { + # values = ["foobar.com"] + # } + # }] + # } + # } + # } + # ex-oidc = { + # port = 445 + # protocol = "HTTPS" + # certificate_arn = module.acm.acm_certificate_arn + # action_type = "authenticate-oidc" + # authenticate_oidc = { + # authentication_request_extra_params = { + # display = "page" + # prompt = "login" + # } + # authorization_endpoint = "https://${var.domain_name}/auth" + # client_id = "client_id" + # client_secret = "client_secret" + # issuer = "https://${var.domain_name}" + # token_endpoint = "https://${var.domain_name}/token" + # user_info_endpoint = "https://${var.domain_name}/user_info" + # } + # forward = { + # target_group_key = "ex-instance" + # } + # } + # } + # target_group_health = { + # dns_failover = { + # minimum_healthy_targets_count = 2 + # } + # unhealthy_state_routing = { + # minimum_healthy_targets_percentage = 50 + # } + # } + # health_check = { + # enabled = true + # interval = 30 + # path = "/healthz" + # port = "traffic-port" + # healthy_threshold = 3 + # unhealthy_threshold = 3 + # timeout = 6 + # protocol = "HTTP" + # matcher = "200-399" + # } + # ex-lambda-with-trigger = { + # name_prefix = "l1-" + # target_type = "lambda" + # lambda_multi_value_headers_enabled = true + # target_id = module.lambda_with_allowed_triggers.lambda_function_arn + # } + # ex-lambda-without-trigger = { + # name_prefix = "l2-" + # target_type = "lambda" + # target_id = module.lambda_without_allowed_triggers.lambda_function_arn + # attach_lambda_permission = true + # } + # } + # additional_target_group_attachments = { + # ex-instance-other = { + # target_group_key = "ex-instance" + # target_type = "instance" + # target_id = aws_instance.other.id + # port = "80" + # } + # } + # # Route53 Record(s) + # route53_records = { + # A = { + # name = local.name + # type = "A" + # zone_id = data.aws_route53_zone.this.id + # } + # AAAA = { + # name = local.name + # type = "AAAA" + # zone_id = data.aws_route53_zone.this.id + # } + # } + + +#module "alb_disabled" { +# source = "../../"# +# +# create = false +#} + +# ################################################################################ +# # Using packaged function from Lambda module +# ################################################################################ + +# locals { +# package_url = "https://raw.githubusercontent.com/terraform-aws-modules/terraform-aws-lambda/master/examples/fixtures/python3.8-zip/existing_package.zip" +# downloaded = "downloaded_package_${md5(local.package_url)}.zip" +# } + +# resource "null_resource" "download_package" { +# triggers = { +# downloaded = local.downloaded +# } + +# provisioner "local-exec" { +# command = "curl -L -o ${local.downloaded} ${local.package_url}" +# } +# } + +# module "lambda_with_allowed_triggers" { +# source = "terraform-aws-modules/lambda/aws" +# version = "~> 6.0" + +# function_name = "${local.name}-with-allowed-triggers" +# description = "My awesome lambda function (with allowed triggers)" +# handler = "index.lambda_handler" +# runtime = "python3.8" + +# publish = true +# create_package = false +# local_existing_package = local.downloaded + +# allowed_triggers = { +# AllowExecutionFromELB = { +# service = "elasticloadbalancing" +# source_arn = module.alb.target_groups["ex-lambda-with-trigger"].arn +# } +# } + +# depends_on = [null_resource.download_package] +# } + +# module "lambda_without_allowed_triggers" { +# source = "terraform-aws-modules/lambda/aws" +# version = "~> 6.0" + +# function_name = "${local.name}-without-allowed-triggers" +# description = "My awesome lambda function (without allowed triggers)" +# handler = "index.lambda_handler" +# runtime = "python3.8" + +# publish = true +# create_package = false +# local_existing_package = local.downloaded + +# # Allowed triggers will be managed by ALB module +# allowed_triggers = {} + +# depends_on = [null_resource.download_package] +# } + +# ################################################################################ +# # Supporting resources +# ################################################################################ + +# module "vpc" { +# source = "terraform-aws-modules/vpc/aws" +# version = "~> 5.0" + +# name = local.name +# cidr = local.vpc_cidr + +# azs = local.azs +# private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] +# public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + +# tags = local.tags +# } + +# module "wildcard_cert" { +# source = "terraform-aws-modules/acm/aws" +# version = "~> 4.0" + +# domain_name = "*.${var.domain_name}" +# zone_id = data.aws_route53_zone.this.id +# } + +# data "aws_ssm_parameter" "al2" { +# name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2" +# } + +# resource "aws_instance" "this" { +# ami = data.aws_ssm_parameter.al2.value +# instance_type = "t3.nano" +# subnet_id = element(module.vpc.private_subnets, 0) +# } + +# resource "aws_instance" "other" { +# ami = data.aws_ssm_parameter.al2.value +# instance_type = "t3.nano" +# subnet_id = element(module.vpc.private_subnets, 0) +# } + +# ################################################################## +# # AWS Cognito User Pool +# ################################################################## + +# resource "aws_cognito_user_pool" "this" { +# name = "user-pool-${local.name}" +# } + +# resource "aws_cognito_user_pool_client" "this" { +# name = "user-pool-client-${local.name}" +# user_pool_id = aws_cognito_user_pool.this.id +# generate_secret = true +# allowed_oauth_flows = ["code", "implicit"] +# callback_urls = ["https://${var.domain_name}/callback"] +# allowed_oauth_scopes = ["email", "openid"] +# allowed_oauth_flows_user_pool_client = true +# } + +# resource "random_string" "this" { +# length = 5 +# upper = false +# special = false +# } + +# resource "aws_cognito_user_pool_domain" "this" { +# domain = "${local.name}-${random_string.this.result}" +# user_pool_id = aws_cognito_user_pool.this.id +# } + +#module# "log_bucket" { +# source = "terraform-aws-modules/s3-bucket/aws" +# version = "~> 3.0" + +# bucket_prefix = "${local.name}-logs-" +# acl = "log-delivery-write" + +# # For example only +# force_destroy = true + +# control_object_ownership = true +# object_ownership = "ObjectWriter" + +# attach_elb_log_delivery_policy = true # Required for ALB logs +# attach_lb_log_delivery_policy = true # Required for ALB/NLB logs + +# attach_deny_insecure_transport_policy = true +# attach_require_latest_tls_policy = true +# tags = local.tags +#} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/route53/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/route53/main.tf new file mode 100644 index 0000000..697bffa --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/route53/main.tf @@ -0,0 +1,36 @@ +variable domain_name {} # = local.name +variable alb_dns_name {} +variable alb_dns_zone {} +data "aws_route53_zone" "primary" { + name = var.domain_name +} + +resource "aws_route53_record" "api-cname" { + zone_id = data.aws_route53_zone.primary.zone_id + name = var.domain_name + # type = "CNAME" + type = "A" +# ttl = 5 + +# weighted_routing_policy { +# weight = 10 +# } + #set_identifier = "dev" + alias { + name = var.alb_dns_name + zone_id = var.alb_dns_zone + evaluate_target_health = true + + # + } +} + +output cname { + value = aws_route53_record.api-cname.fqdn +} +output zone { + value = data.aws_route53_zone.primary +} +output primary_zone_id { + value = data.aws_route53_zone.primary.zone_id +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/target_group/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/target_group/main.tf new file mode 100644 index 0000000..017efed --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/target_group/main.tf @@ -0,0 +1,34 @@ +variable vpc_id {} +resource "aws_lb_target_group" "this" { + name_prefix = "swarms" + protocol = "HTTP" + port = 80 + target_type = "instance" + vpc_id = var.vpc_id + deregistration_delay = 10 + #load_balancing_algorithm_type = "weighted_random" + #load_balancing_anomaly_mitigation = "on" + #load_balancing_cross_zone_enabled = false + protocol_version = "HTTP1" + # + health_check { + path = "/v1/docs" # the docs api + enabled = true + healthy_threshold = 10 + interval = 130 + port = "traffic-port" + protocol = "HTTP" + timeout = 120 + unhealthy_threshold = 10 + } + +# stickiness { +# cookie_duration = 86400 +# enabled = true +# type = "lb_cookie" +# } + +} +output alb_target_group_arn { + value = aws_lb_target_group.this.arn +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/autoscaling_group/Makefile b/environments/swarms-aws-agent-api/dev/us-east-1/components/autoscaling_group/Makefile new file mode 100644 index 0000000..6a6bebf --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/autoscaling_group/Makefile @@ -0,0 +1,4 @@ +doit : + tofu init + tofu plan + tofu apply -auto-approve diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/autoscaling_group/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/autoscaling_group/main.tf new file mode 100644 index 0000000..e5e31ef --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/autoscaling_group/main.tf @@ -0,0 +1,68 @@ +variable aws_iam_instance_profile_ssm_arn {} +variable target_group_arn{} +variable name {} +variable instance_type {} +variable launch_template_id {} +variable image_id {} +variable vpc_id {} +variable tags {} +variable ec2_subnet_id {} + +locals { + iam_instance_profile_arn = var.aws_iam_instance_profile_ssm_arn + instance_type = var.instance_type +} + +module "autoscaling" { + source = "terraform-aws-modules/autoscaling/aws" + version = "8.0.0" + name = var.name + + health_check_type = "EC2" + desired_capacity = 1 + max_size = 5 + min_size = 1 + + create_launch_template = false + update_default_version = true + + launch_template_id = var.launch_template_id + launch_template_version = "$Latest" + + vpc_zone_identifier = [var.ec2_subnet_id] + + instance_market_options = { + market_type = "spot" + } + network_interfaces = [{ + associate_public_ip_address=true + device_index = 0 + delete_on_termination = true + description = "interface1" +# security_groups = [var.security_group_id] + } + ] + instance_type = var.instance_type + image_id = var.image_id + + create_iam_instance_profile = true + iam_role_name = "ssm-${var.name}" + iam_role_path = "/ec2/" + iam_role_description = "SSM IAM role for swarms" + iam_role_tags = { + CustomIamRole = "Yes" + } + + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + + # target_group_arn = + traffic_source_attachments = { + ex-alb = { + traffic_source_identifier = var.target_group_arn + traffic_source_type = "elbv2" # default + } + } + +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/README.md b/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/README.md new file mode 100644 index 0000000..505f95a --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/README.md @@ -0,0 +1,275 @@ +# manual setup + +1. Created auth client here +https://console.cloud.google.com/auth/clients/ + +2. stored password in +``` +export TF_VAR_google_oauth_client_id=DDDDD +export TF_VAR_google_oauth_client_secret=XXXX +``` + +# This is a complete example + +``` +module "aws_cognito_user_pool_complete_example" { + + source = "lgallard/cognito-user-pool/aws" + + user_pool_name = "mypool_complete" + alias_attributes = ["email", "phone_number"] + auto_verified_attributes = ["email"] + sms_authentication_message = "Your username is {username} and temporary password is {####}." + sms_verification_message = "This is the verification message {####}." + + deletion_protection = "ACTIVE" + + mfa_configuration = "OPTIONAL" + software_token_mfa_configuration = { + enabled = true + } + + admin_create_user_config = { + email_message = "Dear {username}, your verification code is {####}." + email_subject = "Here, your verification code baby" + sms_message = "Your username is {username} and temporary password is {####}." + } + + device_configuration = { + challenge_required_on_new_device = true + device_only_remembered_on_user_prompt = true + } + + email_configuration = { + email_sending_account = "DEVELOPER" + reply_to_email_address = "email@mydomain.com" + source_arn = "arn:aws:ses:us-east-1:123456789012:identity/myemail@mydomain.com" + } + + lambda_config = { + create_auth_challenge = "arn:aws:lambda:us-east-1:123456789012:function:create_auth_challenge" + custom_message = "arn:aws:lambda:us-east-1:123456789012:function:custom_message" + define_auth_challenge = "arn:aws:lambda:us-east-1:123456789012:function:define_auth_challenge" + post_authentication = "arn:aws:lambda:us-east-1:123456789012:function:post_authentication" + post_confirmation = "arn:aws:lambda:us-east-1:123456789012:function:post_confirmation" + pre_authentication = "arn:aws:lambda:us-east-1:123456789012:function:pre_authentication" + pre_sign_up = "arn:aws:lambda:us-east-1:123456789012:function:pre_sign_up" + pre_token_generation = "arn:aws:lambda:us-east-1:123456789012:function:pre_token_generation" + user_migration = "arn:aws:lambda:us-east-1:123456789012:function:user_migration" + verify_auth_challenge_response = "arn:aws:lambda:us-east-1:123456789012:function:verify_auth_challenge_response" + } + + password_policy = { + minimum_length = 10 + require_lowercase = false + require_numbers = true + require_symbols = true + require_uppercase = true + temporary_password_validity_days = 120 + + } + + user_pool_add_ons = { + advanced_security_mode = "ENFORCED" + } + + verification_message_template = { + default_email_option = "CONFIRM_WITH_CODE" + } + + schemas = [ + { + attribute_data_type = "Boolean" + developer_only_attribute = false + mutable = true + name = "available" + required = false + }, + { + attribute_data_type = "Boolean" + developer_only_attribute = true + mutable = true + name = "registered" + required = false + } + ] + + string_schemas = [ + { + attribute_data_type = "String" + developer_only_attribute = false + mutable = false + name = "email" + required = true + + string_attribute_constraints = { + min_length = 7 + max_length = 15 + } + }, + { + attribute_data_type = "String" + developer_only_attribute = false + mutable = false + name = "gender" + required = true + + string_attribute_constraints = { + min_length = 7 + max_length = 15 + } + }, + ] + + number_schemas = [ + { + attribute_data_type = "Number" + developer_only_attribute = true + mutable = true + name = "mynumber1" + required = false + + number_attribute_constraints = { + min_value = 2 + max_value = 6 + } + }, + { + attribute_data_type = "Number" + developer_only_attribute = true + mutable = true + name = "mynumber2" + required = false + + number_attribute_constraints = { + min_value = 2 + max_value = 6 + } + }, + ] + + # user_pool_domain + domain = "mydomain-com" + + # clients + clients = [ + { + allowed_oauth_flows = [] + allowed_oauth_flows_user_pool_client = false + allowed_oauth_scopes = [] + callback_urls = ["https://mydomain.com/callback"] + default_redirect_uri = "https://mydomain.com/callback" + explicit_auth_flows = [] + generate_secret = true + logout_urls = [] + name = "test1" + read_attributes = ["email"] + supported_identity_providers = [] + write_attributes = [] + access_token_validity = 1 + id_token_validity = 1 + refresh_token_validity = 60 + token_validity_units = { + access_token = "hours" + id_token = "hours" + refresh_token = "days" + } + }, + { + allowed_oauth_flows = [] + allowed_oauth_flows_user_pool_client = false + allowed_oauth_scopes = [] + callback_urls = ["https://mydomain.com/callback"] + default_redirect_uri = "https://mydomain.com/callback" + explicit_auth_flows = [] + generate_secret = false + logout_urls = [] + name = "test2" + read_attributes = [] + supported_identity_providers = [] + write_attributes = [] + refresh_token_validity = 30 + }, + { + allowed_oauth_flows = ["code", "implicit"] + allowed_oauth_flows_user_pool_client = true + allowed_oauth_scopes = ["email", "openid"] + callback_urls = ["https://mydomain.com/callback"] + default_redirect_uri = "https://mydomain.com/callback" + explicit_auth_flows = ["CUSTOM_AUTH_FLOW_ONLY", "ADMIN_NO_SRP_AUTH"] + generate_secret = false + logout_urls = ["https://mydomain.com/logout"] + name = "test3" + read_attributes = ["email", "phone_number"] + supported_identity_providers = [] + write_attributes = ["email", "gender", "locale", ] + refresh_token_validity = 30 + } + ] + + # user_group + user_groups = [ + { name = "mygroup1" + description = "My group 1" + }, + { name = "mygroup2" + description = "My group 2" + }, + ] + + # resource_servers + resource_servers = [ + { + identifier = "https://mydomain.com" + name = "mydomain" + scope = [ + { + scope_name = "sample-scope-1" + scope_description = "A sample Scope Description for mydomain.com" + }, + { + scope_name = "sample-scope-2" + scope_description = "Another sample Scope Description for mydomain.com" + }, + ] + }, + { + identifier = "https://weather-read-app.com" + name = "weather-read" + scope = [ + { + scope_name = "weather.read" + scope_description = "Read weather forecasts" + } + ] + } + ] + + # identity_providers + identity_providers = [ + { + provider_name = "Google" + provider_type = "Google" + + provider_details = { + authorize_scopes = "email" + client_id = "your client_id" + client_secret = "your client_secret" + } + + attribute_mapping = { + email = "email" + username = "sub" + gender = "gender" + } + } + ] + + # tags + tags = { + Owner = "infra" + Environment = "production" + Terraform = true + } +} +``` diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/main.tf new file mode 100644 index 0000000..d4de4ba --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/main.tf @@ -0,0 +1,345 @@ +variable "google_oauth_client_secret" {} +variable "google_oauth_client_id" {} +variable aws_region {} # us-east-1 +variable aws_account {} +variable myemail {} +variable mydomain {} +variable mydomain_suffix {} +locals { + mydomain=var.mydomain + mydomain_suffix = var.mydomain_suffix + mydomain_dot_com = "${local.mydomain}.${local.mydomain_suffix}" + myemail=var.myemail + myaccount=var.aws_account +} + +module "aws_cognito_user_pool_complete_example" { + + source = "lgallard/cognito-user-pool/aws" + + user_pool_name = "mypool_complete" + alias_attributes = ["email", "phone_number"] + auto_verified_attributes = ["email"] + sms_authentication_message = "Your username is {username} and temporary password is {####}." + sms_verification_message = "This is the verification message {####}." + + deletion_protection = "ACTIVE" + + mfa_configuration = "OPTIONAL" + software_token_mfa_configuration = { + enabled = true + } + + admin_create_user_config = { + email_message = "Dear {username}, your verification code is {####}." + email_subject = "Here, your verification code baby" + sms_message = "Your username is {username} and temporary password is {####}." + } + + device_configuration = { + challenge_required_on_new_device = true + device_only_remembered_on_user_prompt = true + } + + email_configuration = { + email_sending_account = "DEVELOPER" + reply_to_email_address = "email@${local.mydomain_dot_com}" + source_arn = "arn:aws:ses:${var.aws_region}:${var.aws_account}:identity/${local.myemail}@${local.mydomain_dot_com}" + } + + lambda_config = { + create_auth_challenge = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:create_auth_challenge" + custom_message = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:custom_message" + define_auth_challenge = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:define_auth_challenge" + post_authentication = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:post_authentication" + post_confirmation = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:post_confirmation" + pre_authentication = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:pre_authentication" + pre_sign_up = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:pre_sign_up" +# pre_token_generation = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:pre_token_generation" + user_migration = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:user_migration" + verify_auth_challenge_response = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:verify_auth_challenge_response" + #kms_key_id = aws_kms_key.lambda-custom-sender.arn + pre_token_generation_config = { + lambda_arn = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:pre_token_generation_config" + lambda_version = "V1_0" + } + #custom_email_sender = { + # lambda_arn = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:custom_email_sender" + # lambda_version = "V1_0" + #} + #custom_sms_sender = { + # lambda_arn = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:custom_sms_sender" + # lambda_version = "V1_0" + #} + } + + password_policy = { + minimum_length = 10 + require_lowercase = false + require_numbers = true + require_symbols = true + require_uppercase = true + temporary_password_validity_days = 120 + + } + + user_pool_add_ons = { + advanced_security_mode = "ENFORCED" + } + + verification_message_template = { + default_email_option = "CONFIRM_WITH_CODE" + } + + schemas = [ + { + attribute_data_type = "Boolean" + developer_only_attribute = false + mutable = true + name = "available" + required = false + }, + { + attribute_data_type = "Boolean" + developer_only_attribute = true + mutable = true + name = "registered" + required = false + } + ] + + string_schemas = [ + { + attribute_data_type = "String" + developer_only_attribute = false + mutable = false + name = "email" + required = true + + string_attribute_constraints = { + min_length = 7 + max_length = 15 + } + }, + { + attribute_data_type = "String" + developer_only_attribute = false + mutable = false + name = "gender" + required = true + + string_attribute_constraints = { + min_length = 7 + max_length = 15 + } + }, + ] + + number_schemas = [ + { + attribute_data_type = "Number" + developer_only_attribute = true + mutable = true + name = "mynumber1" + required = false + + number_attribute_constraints = { + min_value = 2 + max_value = 6 + } + }, + { + attribute_data_type = "Number" + developer_only_attribute = true + mutable = true + name = "mynumber2" + required = false + + number_attribute_constraints = { + min_value = 2 + max_value = 6 + } + }, + ] + + # user_pool_domain + domain = "${local.mydomain}-com" + + # clients + clients = [ + { + allowed_oauth_flows_user_pool_client = false + allowed_oauth_scopes = [ + "aws.cognito.signin.user.admin", + "email", + "https://introspector.meme/sample-scope-1", + "https://introspector.meme/sample-scope-2", + "openid", + "phone", + "profile", + + ] + callback_urls = ["https://${local.mydomain_dot_com}/callback"] + default_redirect_uri = "https://${local.mydomain_dot_com}/callback" + explicit_auth_flows = [] + generate_secret = true + logout_urls = [] + name = "test1" + read_attributes = ["email"] + supported_identity_providers = [ + "COGNITO", + "Google", + + ] + write_attributes = [] + access_token_validity = 1 + id_token_validity = 1 + refresh_token_validity = 60 + token_validity_units = { + access_token = "hours" + id_token = "hours" + refresh_token = "days" + } + ui_customization_css = file("./custom_style.css") + ui_customization_image_file = filebase64("logo.png") + }, + { + allowed_oauth_flows = [ + "code", + "implicit" + ] + allowed_oauth_flows_user_pool_client = false # FIXME why? + allowed_oauth_scopes = [ + "aws.cognito.signin.user.admin", + "email", + "https://introspector.meme/sample-scope-1", + "https://introspector.meme/sample-scope-2", + "openid", + "phone", + "profile", + + ] + callback_urls = ["https://${local.mydomain_dot_com}/callback"] + default_redirect_uri = "https://${local.mydomain_dot_com}/callback" + explicit_auth_flows = [] + generate_secret = false + logout_urls = [] + name = "test2" + read_attributes = [] + supported_identity_providers = [ + "COGNITO", + "Google", + + ] + write_attributes = [] + refresh_token_validity = 30 + }, + { + allowed_oauth_flows = ["code", "implicit"] + allowed_oauth_flows_user_pool_client = true + allowed_oauth_scopes = [ + "aws.cognito.signin.user.admin", + "email", + "https://introspector.meme/sample-scope-1", + "https://introspector.meme/sample-scope-2", + "openid", + "phone", + "profile", + + ] + callback_urls = ["https://${local.mydomain_dot_com}/callback"] + default_redirect_uri = "https://${local.mydomain_dot_com}/callback" + explicit_auth_flows = ["CUSTOM_AUTH_FLOW_ONLY", "ADMIN_NO_SRP_AUTH"] + generate_secret = false + logout_urls = ["https://${local.mydomain_dot_com}/logout"] + name = "test3" + read_attributes = ["email", "phone_number"] + supported_identity_providers = [] + write_attributes = ["email", "gender", "locale", ] + refresh_token_validity = 30 + } + ] + + # user_group + user_groups = [ + { name = "mygroup1" + description = "My group 1" + }, + { name = "mygroup2" + description = "My group 2" + }, + ] + + # resource_servers + resource_servers = [ + { + identifier = "https://${local.mydomain_dot_com}" + name = "${local.mydomain}" + scope = [ + { + scope_name = "sample-scope-1" + scope_description = "A sample Scope Description for ${local.mydomain_dot_com}" + }, + { + scope_name = "sample-scope-2" + scope_description = "Another sample Scope Description for ${local.mydomain_dot_com}" + }, + ] + }, + { + identifier = "https://weather-read-app.com" + name = "weather-read" + scope = [ + { + scope_name = "weather.read" + scope_description = "Read weather forecasts" + } + ] + } + ] + + # identity_providers + identity_providers = [ + { + provider_name = "Google" + provider_type = "Google" + + provider_details = { + authorize_scopes = "email" + #export TF_VAR_google_oauth_client_id=XXXX + client_id = var.google_oauth_client_id # This should be retrieved from AWS Secret Manager, otherwise Terraform will force an in-place replacement becuase is treated as a sensitive value + # export TF_VAR_google_oauth_client_secret=YYY + client_secret = var.google_oauth_client_secret #"your client_secret" # # This should be retrieved from AWS Secret Manager, otherwise Terraform will force an in-place replacement becuase is treated as a sensitive value + attributes_url_add_attributes = "true" + authorize_url = "https://accounts.google.com/o/oauth2/v2/auth" + oidc_issuer = "https://accounts.google.com" + token_request_method = "POST" + token_url = "https://www.googleapis.com/oauth2/v4/token" + } + + attribute_mapping = { + email = "email" + username = "sub" + gender = "gender" + } + } + ] + + # tags + tags = { + Owner = "infra" + Environment = "production" + Terraform = true + } +} + + + # KMS key for lambda custom sender config" + resource "aws_kms_key" "lambda-custom-sender" { + count = 0 + description = "KMS key for lambda custom sender config" + } + +output cognito{ + value = module.aws_cognito_user_pool_complete_example +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/provider.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/provider.tf new file mode 100644 index 0000000..634c762 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/provider.tf @@ -0,0 +1,4 @@ +provider "aws" { + region = var.env["region"] + profile = var.env["profile"] +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/variables.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/variables.tf new file mode 100644 index 0000000..c7b7aed --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/variables.tf @@ -0,0 +1,4 @@ +variable "env" { + type = map(any) + default = {} +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/keypairs/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/keypairs/main.tf new file mode 100644 index 0000000..8a8040b --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/keypairs/main.tf @@ -0,0 +1,4 @@ +resource "aws_key_pair" "mdupont-deployer" { + key_name = "mdupont-deployer-key" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCYh8dzILXDkDpXDJ+wUiru8EPNPETWWHHFlnVI7Uc2Bb2f/yHJL9bV0QUQ+/hN2OGeN3r4z34vf444A7oMXT8T2bnMDzXqGfrqpQ6+Xs7Cu2F6wGik+KDBsu52vhaATVLOnDegbhpQW+IikSvxe4huKOvQpF+p7Ex80B+XpBPEV23DXapjZI+FIsbYoD4Mp5qY/PmXisNCByayhBG7WBhCEtHxkvpFntkz/9bwk2kC/z2W1SIHufN5TbrxKPKWY5iguW0Mn2e/rNvxnxFZaRx224rQnRFBMSq4Oi91MNdilwDHFzkv4oVBtpUmCet84np8+DxCfzphyIpo899dRV+/f7dwb6ZY3cvBkALcWahsscuE4ypbroXQ40UPAa3gW1PirTNdMEiX+Ie/IzEDWWCJKdDv4JaGtKAPORfC7bbXnBYn5RASglOjI24w974Llyj5TXXKexjxsjF3wlSS6pNHFlFJnQzVfemcY6AqSJ0Xr8dfFbxpSYH9OFkvBhzPaec= mdupont@mdupont-G470" +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/.terraform.lock.hcl b/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/.terraform.lock.hcl new file mode 100644 index 0000000..a720cb2 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/.terraform.lock.hcl @@ -0,0 +1,19 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/aws" { + version = "5.81.0" + hashes = [ + "h1:ird967uf44WBZ0u9rMlOdO4nCKJZMlUknLcD3lz4dWs=", + "zh:0d0c7cc1e16b16cd00ab36de35038162e1871b51a902e9016d08c55c9fc4de35", + "zh:0e4b8c6e46999015292ab4fb9a430ab00107a276f25243552cde66db53c58661", + "zh:30041314cdd4e877d75ee8c9d36aecfca094e276f7a3d8150f929cf5169b2fa5", + "zh:5ebd248ce3e5a7ef9cc2f41499668f182146325e10ea305c70469122f6161a13", + "zh:888a69d371373b418549e03f5922becb2b8074cb463552ecfa65f30455708db0", + "zh:8a21bb7fe60383ff5ca9db8630a287e41fd520d2514c08874a16dc74500fadd7", + "zh:9c4663dcbfe58544642d70ebfdc6c5fa91592ff04164f77c655e32e6024483e2", + "zh:b322873f1209443a8407d5f782d7d917de6a1391239dbd0e7f809ce6507bed76", + "zh:b7c9d5ca14b818b5932ac58a490646a425ebc41b33d149090aa5f48d1ca35c99", + "zh:e76cd202b03749f3082b0cbe849fd2e731cf3f9a6aa994d2d629602c3aede36c", + ] +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/Makefile b/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/Makefile new file mode 100644 index 0000000..6a6bebf --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/Makefile @@ -0,0 +1,4 @@ +doit : + tofu init + tofu plan + tofu apply -auto-approve diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/main.tf new file mode 100644 index 0000000..09801f7 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/launch_template/main.tf @@ -0,0 +1,78 @@ +variable iam_instance_profile_name {} +variable security_group_id {} +variable name {} +variable vpc_id {} +variable ami_id {} +variable tags {} +variable key_name { + default = "mdupont-deployer-key" +} + +# dont use this +variable instance_type {} + +locals { + tags = { + project="swarms" + instance_type = var.instance_type + name = var.name + } +} +resource "aws_launch_template" "ec2_launch_template" { + name_prefix = "${var.name}-launch-template-" + image_id = var.ami_id + key_name = var.key_name + instance_type = var.instance_type + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [var.security_group_id] + } + + iam_instance_profile { + # iam_instance_profile_arn = aws_iam_instance_profile.ssm.arn + name = var.iam_instance_profile_name #aws_iam_instance_profile.ec2_instance_profile.name + } + lifecycle { + create_before_destroy = true + } + block_device_mappings { + device_name = "/dev/sda1" + ebs { + volume_size = 30 + volume_type = "gp3" + encrypted = true + } + } + + user_data = base64encode(<<-EOF + #!/bin/bash + export HOME=/root + apt update + apt-get install -y ec2-instance-connect git virtualenv + snap install amazon-ssm-agent --classic || echo oops1 + snap start amazon-ssm-agent || echo oops2 + apt-get install -y --no-install-recommends ca-certificates=20230311 curl=7.88.1-10+deb12u7 | echo oops + curl -O "https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/$(dpkg --print-architecture)/latest/amazon-cloudwatch-agent.deb" + dpkg -i -E amazon-cloudwatch-agent.deb + + if [ ! -d "/opt/swarms/" ]; then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" + fi + cd "/opt/swarms/" || exit 1 + export BRANCH=feature/ec2 + git stash + git checkout --force $BRANCH + bash -x /opt/swarms/api/install.sh + EOF + ) + tags = var.tags +} + + +output "lt" { + value = resource.aws_launch_template.ec2_launch_template +} +output "launch_template_id" { + value = resource.aws_launch_template.ec2_launch_template.id +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Makefile b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Makefile index 307e8a1..6a6bebf 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Makefile +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Makefile @@ -1,4 +1,4 @@ doit : tofu init tofu plan - tofu apply + tofu apply -auto-approve diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Readme.md index 2ee3737..69b85c3 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Readme.md +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Readme.md @@ -1 +1,9 @@ machine_image + + +`tofu apply -destroy -target module.ec2.aws_spot_instance_request.this[0] -auto-approve` + +aws ec2 describe-images --owners 099720109477 > images.json +* + +tofu state rm "module.ec2.aws_spot_instance_request.this[0]" diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf index a438646..2b7fbaa 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf @@ -1,29 +1,83 @@ -#TASK:write terraform packer for ubuntu python fastapi server with custom git modules -#To create a Terraform configuration using Packer for an Ubuntu-based FastAPI server with custom Git modules, you'll need to follow a few steps. Below is a concise example of how to structure your Packer template and Terraform files. - -provider "aws" { - region = "us-east-1" +locals { + name = "swarms" + tags = { + project="swarms" + } } -data "aws_ami" "ubuntu" { - most_recent = true +module "security_group_instance" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + name = "${local.name}-ec2" + description = "Security Group for EC2 Instance" + vpc_id = local.vpc_id + ingress_with_cidr_blocks = [ + { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + }, + { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + } + ] - owners = ["099720109477"] # Ubuntu's account ID - filter { - name = "name" - values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] + egress_rules = ["all-all"] + tags = local.tags +} + +module "ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + associate_public_ip_address = true # for now + name = local.name + ami = local.ami # data.aws_ami.ubuntu.id + instance_type = "t3.large" + create_iam_instance_profile = true + iam_role_description = "IAM role for EC2 instance" + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" } + vpc_security_group_ids = [module.security_group_instance.security_group_id] + + root_block_device = [ + { + encrypted = true + volume_size = 30 + volume_type = "gp3" + } + ] + + user_data = <<-EOF +#!/bin/bash +export HOME=/root +apt update +apt-get install -y ec2-instance-connect git virtualenv + +if [ ! -d "/opt/swarms/" ]; + then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" +fi +cd "/opt/swarms/" || exit 1 # "we need swarms" +export BRANCH=feature/ec2 +git checkout --force $BRANCH +bash -x /opt/swarms/api/install.sh + EOF + tags = local.tags + create_spot_instance = true + subnet_id = local.ec2_subnet_id } -resource "aws_instance" "fastapi_server_test_instance" { - count = var.test_server_count - ami = data.aws_ami.ubuntu.id - instance_type = "t2.micro" - # add in this user data - tags = { - Name = "FastAPI Server" - } +output "ec2_data" { + value = module.ec2 } +output "iam_instance_profile_name" { + value = module.ec2.iam_instance_profile_id + description = "IAM Instance Profile Name created for EC2 instance" +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf.asg b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf.asg new file mode 100644 index 0000000..b05f717 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf.asg @@ -0,0 +1,253 @@ +provider "aws" { + region = "us-east-1" +} + +locals { + ami = "ami-0e2c8caa4b6378d8c" + name = "swarms" + region = "us-east-1" + ec2_subnet_id = "subnet-057c90cfe7b2e5646" + vpc_id = "vpc-04f28c9347af48b55" + tags = { + project="swarms" + } +} + +module "security_group_instance" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + name = "${local.name}-ec2" + description = "Security Group for EC2 Instance" + vpc_id = local.vpc_id + ingress_with_cidr_blocks = [ + { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + }, + { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + } + ] + + egress_rules = ["all-all"] + tags = local.tags +} + +module "ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + associate_public_ip_address = true # for now + name = local.name + ami = local.ami # data.aws_ami.ubuntu.id + instance_type = "t3.large" + create_iam_instance_profile = true + iam_role_description = "IAM role for EC2 instance" + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + vpc_security_group_ids = [module.security_group_instance.security_group_id] + + root_block_device = [ + { + encrypted = true + volume_size = 30 + volume_type = "gp3" + } + ] + + user_data = <<-EOF +#!/bin/bash +export HOME=/root +apt update +apt-get install -y ec2-instance-connect git virtualenv + +if [ ! -d "/opt/swarms/" ]; + then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" +fi +cd "/opt/swarms/" || exit 1 # "we need swarms" +export BRANCH=feature/ec2 +git checkout --force $BRANCH +bash -x /opt/swarms/api/install.sh + EOF + tags = local.tags + create_spot_instance = true + subnet_id = local.ec2_subnet_id +} + +TASK FOR GPT: +#1/ create an autoscaling group +#2/ create a launch template +#3/ create a lambda function to trigger the launching of new instances + + + +To create an Auto Scaling group, a launch template, and a Lambda function to trigger the launching of new instances, you can add the following configurations to your Terraform script: + +### 1. Create a Launch Template + +```hcl +resource "aws_launch_template" "ec2_launch_template" { + name_prefix = "${local.name}-launch-template-" + image_id = local.ami + instance_type = "t3.large" + associate_public_ip_address = true + + iam_instance_profile { + name = aws_iam_instance_profile.ec2_instance_profile.name + } + + key_name = "your-key-pair" # Replace with your key pair name + + lifecycle { + create_before_destroy = true + } + + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = 30 + volume_type = "gp3" + encrypted = true + } + } + + user_data = <<-EOF + #!/bin/bash + export HOME=/root + apt update + apt-get install -y ec2-instance-connect git virtualenv + + if [ ! -d "/opt/swarms/" ]; then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" + fi + cd "/opt/swarms/" || exit 1 + export BRANCH=feature/ec2 + git checkout --force $BRANCH + bash -x /opt/swarms/api/install.sh + EOF + + tags = local.tags +} +``` + +### 2. Create an Auto Scaling Group + +```hcl +resource "aws_autoscaling_group" "ec2_autoscaling_group" { + desired_capacity = 1 + max_size = 5 + min_size = 1 + + launch_template { + id = aws_launch_template.ec2_launch_template.id + version = "$Latest" + } + + vpc_zone_identifier = [local.ec2_subnet_id] + + tags = [ + { + key = "Name" + value = local.name + propagate_at_launch = true + }, + { + key = "Project" + value = local.tags.project + propagate_at_launch = true + } + ] +} +``` + +### 3. Create a Lambda Function to Trigger New Instances + +```hcl +resource "aws_iam_role" "lambda_role" { + name = "${local.name}-lambda-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_policy" "lambda_scaling_policy" { + name = "${local.name}-scaling-policy" + description = "Policy to allow Lambda to manage Auto Scaling" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:SetDesiredCapacity" + ] + Resource = "*" + Effect = "Allow" + }] + }) +} + +resource "aws_iam_role_policy_attachment" "lambda_policy_attach" { + role = aws_iam_role.lambda_role.name + policy_arn = aws_iam_policy.lambda_scaling_policy.arn +} + +resource "aws_lambda_function" "scale_lambda" { + function_name = "${local.name}-scale-lambda" + role = aws_iam_role.lambda_role.arn + + runtime = "python3.8" + handler = "scale_lambda.handler" + + source_code_hash = filebase64sha256("lambda/scale_lambda.zip") + + # Environment variables for the Lambda function + environment = { + AUTO_SCALING_GROUP_NAME = aws_autoscaling_group.ec2_autoscaling_group.name + } +} +``` + +### 4. Lambda Function Code (Python) + +You'll need to create a file named `scale_lambda.py` in the `lambda/` directory with content like the following: + +```python +import boto3 +import os + +def handler(event, context): + asg_name = os.environ['AUTO_SCALING_GROUP_NAME'] + client = boto3.client('autoscaling') + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) + desired_capacity = response['AutoScalingGroups'][0]['DesiredCapacity'] + + # Example scaling logic + client.set_desired_capacity( + AutoScalingGroupName=asg_name, + DesiredCapacity=desired_capacity + 1, # Increment desired instances + HonorCooldown=True + ) +``` + +### Note +- Replace `"your-key-pair"` in the launch template with your actual key pair name. +- Ensure you have the right IAM permissions for your Lambda function. +- You may need to handle packaging and deploying your Lambda function code (`scale_lambda.py`) as a zip file for Terraform. +- Adjust desired capacity and scaling logic as per your requirement. + +This configuration sets up an Auto Scaling group with a launch template and a Lambda function that can adjust the desired number of running instances based on certain conditions (you would need to implement those conditions). \ No newline at end of file diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf.consts.org b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf.consts.org new file mode 100644 index 0000000..192c054 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf.consts.org @@ -0,0 +1,194 @@ +provider "aws" { + region = "us-east-1" +} + +#1. create an interface to import these constants from other modules in a clean way +locals { + ami = "ami-0e2c8caa4b6378d8c" + name = "swarms" + region = "us-east-1" + ec2_subnet_id = "subnet-057c90cfe7b2e5646" + vpc_id = "vpc-04f28c9347af48b55" + tags = { + project="swarms" + } +} + +module "security_group_instance" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + name = "${local.name}-ec2" + description = "Security Group for EC2 Instance" + vpc_id = local.vpc_id + ingress_with_cidr_blocks = [ + { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + }, + { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + } + ] + + egress_rules = ["all-all"] + tags = local.tags +} + +module "ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + associate_public_ip_address = true # for now + name = local.name + ami = local.ami # data.aws_ami.ubuntu.id + instance_type = "t3.large" + create_iam_instance_profile = true + iam_role_description = "IAM role for EC2 instance" + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + vpc_security_group_ids = [module.security_group_instance.security_group_id] + + root_block_device = [ + { + encrypted = true + volume_size = 30 + volume_type = "gp3" + } + ] + + user_data = <<-EOF +#!/bin/bash +export HOME=/root +apt update +apt-get install -y ec2-instance-connect git virtualenv + +if [ ! -d "/opt/swarms/" ]; + then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" +fi +cd "/opt/swarms/" || exit 1 # "we need swarms" +export BRANCH=feature/ec2 +git checkout --force $BRANCH +bash -x /opt/swarms/api/install.sh + EOF + tags = local.tags + create_spot_instance = true + subnet_id = local.ec2_subnet_id +} + +# create an autoscaling group +# create a launch template +# create a lambda function to trigger the launching of new instances + + +To create a clean interface for importing constants from other modules in Terraform, you can follow the structure outlined below. This involves the use of variables and outputs to encapsulate the constants while maintaining modularity. Here's a concise example: + +### 1. Define Variables in a `variables.tf` File + +```hcl +variable "ami" { + description = "AMI ID for the EC2 instance" + type = string +} + +variable "name" { + description = "Name for the resources" + type = string +} + +variable "region" { + description = "AWS region" + type = string +} + +variable "ec2_subnet_id" { + description = "Subnet ID for the EC2 instance" + type = string +} + +variable "vpc_id" { + description = "VPC ID where resources are deployed" + type = string +} + +variable "tags" { + description = "Tags to assign to resources" + type = map(string) +} +``` + +### 2. Create a `locals.tf` File to Define Constants + +```hcl +locals { + ami = "ami-0e2c8caa4b6378d8c" + name = "swarms" + region = "us-east-1" + ec2_subnet_id = "subnet-057c90cfe7b2e5646" + vpc_id = "vpc-04f28c9347af48b55" + tags = { + project = "swarms" + } +} +``` + +### 3. Outputs for Constants in an `outputs.tf` File + +```hcl +output "ami" { + value = local.ami +} + +output "name" { + value = local.name +} + +output "region" { + value = local.region +} + +output "ec2_subnet_id" { + value = local.ec2_subnet_id +} + +output "vpc_id" { + value = local.vpc_id +} + +output "tags" { + value = local.tags +} +``` + +### 4. Import Variables in Other Modules + +In your main module or other resource modules, import these variables as shown below: + +```hcl +module "constants" { + source = "./path-to-constants-module" # replace with the actual path +} + +# Use them as follows +module "security_group_instance" { + source = "terraform-aws-modules/security-group/aws" + name = "${module.constants.name}-ec2" + vpc_id = module.constants.vpc_id + # ... other configurations +} + +module "ec2" { + ami = module.constants.ami + name = module.constants.name + subnet_id = module.constants.ec2_subnet_id + # ... other configurations +} +``` + +### Summary + +This structure ensures that you can import and use constants across your Terraform module cleanly without hardcoding values in multiple places. Each module can reference these shared values as needed for configurations. diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/variables.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/variables.tf index c36c95f..e9e30b2 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/variables.tf +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/variables.tf @@ -1,3 +1,3 @@ variable test_server_count { - value = 1 # how many test servers to run + default = 1 # how many test servers to run } diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/versions.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/versions.tf new file mode 100644 index 0000000..e69de29 diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/resource_launchers/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/resource_launchers/main.tf new file mode 100644 index 0000000..70a86cd --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/resource_launchers/main.tf @@ -0,0 +1,74 @@ +resource "aws_iam_role" "lambda_role" { + name = "${local.name}-lambda-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_policy" "lambda_scaling_policy" { + name = "${local.name}-scaling-policy" + description = "Policy to allow Lambda to manage Auto Scaling" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:SetDesiredCapacity" + ] + Resource = "*" + Effect = "Allow" + }] + }) +} + +resource "aws_iam_role_policy_attachment" "lambda_policy_attach" { + role = aws_iam_role.lambda_role.name + policy_arn = aws_iam_policy.lambda_scaling_policy.arn +} + +resource "aws_lambda_function" "scale_lambda" { + function_name = "${local.name}-scale-lambda" + role = aws_iam_role.lambda_role.arn + + runtime = "python3.8" + handler = "scale_lambda.handler" + + source_code_hash = filebase64sha256("lambda/scale_lambda.zip") + + # Environment variables for the Lambda function + environment = { + AUTO_SCALING_GROUP_NAME = aws_autoscaling_group.ec2_autoscaling_group.name + } +} +``` + +### 4. Lambda Function Code (Python) + +You'll need to create a file named `scale_lambda.py` in the `lambda/` directory with content like the following: + +```python +import boto3 +import os + +def handler(event, context): + asg_name = os.environ['AUTO_SCALING_GROUP_NAME'] + client = boto3.client('autoscaling') + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) + desired_capacity = response['AutoScalingGroups'][0]['DesiredCapacity'] + + # Example scaling logic + client.set_desired_capacity( + AutoScalingGroupName=asg_name, + DesiredCapacity=desired_capacity + 1, # Increment desired instances + HonorCooldown=True + ) diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/security/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/security/main.tf new file mode 100644 index 0000000..5572aba --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/security/main.tf @@ -0,0 +1,53 @@ +variable vpc_id { } +variable tags { } +variable name { } + +module "asg_sg" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + + name = "${var.name}-external" + description = "external group" + vpc_id = var.vpc_id + + ingress_cidr_blocks = ["0.0.0.0/0"] + ingress_rules = [ + "https-443-tcp", + "http-80-tcp", +# "ssh-tcp" dont need this now + ] + + egress_rules = ["all-all"] + + tags = var.tags +} + +module "asg_sg_internal" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + + name = "${var.name}-internal" + description = "An internal security group" + vpc_id = var.vpc_id + # see ~/2024/12/13/terraform-aws-security-group/examples/complete/main.tf + ingress_with_source_security_group_id = [ + { + rule = "http-80-tcp", + # only allow from load balancer for security + source_security_group_id = module.asg_sg.security_group_id + } + ] + egress_rules = ["all-all"] + + tags = var.tags +} + +output "security_group_id" { + value = module.asg_sg.security_group_id +} + +output "internal_security_group_id" { + value = module.asg_sg_internal.security_group_id +} + + diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/.terraform.lock.hcl b/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/.terraform.lock.hcl new file mode 100644 index 0000000..a688307 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/.terraform.lock.hcl @@ -0,0 +1,20 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/aws" { + version = "5.81.0" + constraints = ">= 5.46.0" + hashes = [ + "h1:ird967uf44WBZ0u9rMlOdO4nCKJZMlUknLcD3lz4dWs=", + "zh:0d0c7cc1e16b16cd00ab36de35038162e1871b51a902e9016d08c55c9fc4de35", + "zh:0e4b8c6e46999015292ab4fb9a430ab00107a276f25243552cde66db53c58661", + "zh:30041314cdd4e877d75ee8c9d36aecfca094e276f7a3d8150f929cf5169b2fa5", + "zh:5ebd248ce3e5a7ef9cc2f41499668f182146325e10ea305c70469122f6161a13", + "zh:888a69d371373b418549e03f5922becb2b8074cb463552ecfa65f30455708db0", + "zh:8a21bb7fe60383ff5ca9db8630a287e41fd520d2514c08874a16dc74500fadd7", + "zh:9c4663dcbfe58544642d70ebfdc6c5fa91592ff04164f77c655e32e6024483e2", + "zh:b322873f1209443a8407d5f782d7d917de6a1391239dbd0e7f809ce6507bed76", + "zh:b7c9d5ca14b818b5932ac58a490646a425ebc41b33d149090aa5f48d1ca35c99", + "zh:e76cd202b03749f3082b0cbe849fd2e731cf3f9a6aa994d2d629602c3aede36c", + ] +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/main.tf new file mode 100644 index 0000000..303c8fc --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/main.tf @@ -0,0 +1,234 @@ +#from https://github.com/terraform-aws-modules/terraform-aws-vpc + +data "aws_availability_zones" "available" {} + +locals { + name = "swarms" + + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + + tags = { + project = local.name +# GithubRepo = "terraform-aws-vpc" +# GithubOrg = "terraform-aws-modules" + } +} + +# resource "vpc" "swarms" { +# source = "terraform-aws-modules/vpc/aws" +# #source = "https://github.com/terraform-aws-modules/terraform-aws-vpc.git" +# name = "swarms" +# cidr = "10.0.0.0/16" +# private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] +# public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"] + +# enable_nat_gateway = true +# enable_vpn_gateway = true + +# tags = { +# Terraform = "true" +# Environment = "dev" +# } +# } + +# provider_name = "aws" + + + +################################################################################ +# VPC Module +################################################################################ + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + name = local.name + cidr = local.vpc_cidr + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 4)] + # database_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 8)] + # elasticache_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 12)] + # redshift_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 16)] + # intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 20)] + private_subnet_names = ["Private Subnet One", "Private Subnet Two"] + # # public_subnet_names omitted to show default name generation for all three subnets + # database_subnet_names = ["DB Subnet One"] + # elasticache_subnet_names = ["Elasticache Subnet One", "Elasticache Subnet Two"] + # redshift_subnet_names = ["Redshift Subnet One", "Redshift Subnet Two", "Redshift Subnet Three"] + # intra_subnet_names = [] + # create_database_subnet_group = false + # manage_default_network_acl = false + # manage_default_route_table = false + # manage_default_security_group = false + # enable_dns_hostnames = true + # enable_dns_support = true + enable_nat_gateway = false + # single_nat_gateway = false + # customer_gateways = { + # IP1 = { + # bgp_asn = 65112 + # ip_address = "1.2.3.4" + # device_name = "some_name" + # }, + # IP2 = { + # bgp_asn = 65112 + # ip_address = "5.6.7.8" + # } + # } + # enable_vpn_gateway = true + # enable_dhcp_options = true + # dhcp_options_domain_name = "service.consul" + # dhcp_options_domain_name_servers = ["127.0.0.1", "10.10.0.2"] + + # # VPC Flow Logs (Cloudwatch log group and IAM role will be created) + # vpc_flow_log_iam_role_name = "vpc-complete-example-role" + # vpc_flow_log_iam_role_use_name_prefix = false + # enable_flow_log = true + # create_flow_log_cloudwatch_log_group = true + # create_flow_log_cloudwatch_iam_role = true + # flow_log_max_aggregation_interval = 60 + + tags = local.tags +} + +# ################################################################################ +# # VPC Endpoints Module +# ################################################################################ + +# module "vpc_endpoints" { +# source = "../../modules/vpc-endpoints" + +# vpc_id = module.vpc.vpc_id + +# create_security_group = true +# security_group_name_prefix = "${local.name}-vpc-endpoints-" +# security_group_description = "VPC endpoint security group" +# security_group_rules = { +# ingress_https = { +# description = "HTTPS from VPC" +# cidr_blocks = [module.vpc.vpc_cidr_block] +# } +# } + +# endpoints = { +# s3 = { +# service = "s3" +# private_dns_enabled = true +# dns_options = { +# private_dns_only_for_inbound_resolver_endpoint = false +# } +# tags = { Name = "s3-vpc-endpoint" } +# }, +# dynamodb = { +# service = "dynamodb" +# service_type = "Gateway" +# route_table_ids = flatten([module.vpc.intra_route_table_ids, module.vpc.private_route_table_ids, module.vpc.public_route_table_ids]) +# policy = data.aws_iam_policy_document.dynamodb_endpoint_policy.json +# tags = { Name = "dynamodb-vpc-endpoint" } +# }, +# ecs = { +# service = "ecs" +# private_dns_enabled = true +# subnet_ids = module.vpc.private_subnets +# }, +# ecs_telemetry = { +# create = false +# service = "ecs-telemetry" +# private_dns_enabled = true +# subnet_ids = module.vpc.private_subnets +# }, +# ecr_api = { +# service = "ecr.api" +# private_dns_enabled = true +# subnet_ids = module.vpc.private_subnets +# policy = data.aws_iam_policy_document.generic_endpoint_policy.json +# }, +# ecr_dkr = { +# service = "ecr.dkr" +# private_dns_enabled = true +# subnet_ids = module.vpc.private_subnets +# policy = data.aws_iam_policy_document.generic_endpoint_policy.json +# }, +# rds = { +# service = "rds" +# private_dns_enabled = true +# subnet_ids = module.vpc.private_subnets +# security_group_ids = [aws_security_group.rds.id] +# }, +# } + +# tags = merge(local.tags, { +# Project = "Secret" +# Endpoint = "true" +# }) +# } + +# module "vpc_endpoints_nocreate" { +# source = "../../modules/vpc-endpoints" + +# create = false +# } + +# ################################################################################ +# # Supporting Resources +# ################################################################################ + +# data "aws_iam_policy_document" "dynamodb_endpoint_policy" { +# statement { +# effect = "Deny" +# actions = ["dynamodb:*"] +# resources = ["*"] + +# principals { +# type = "*" +# identifiers = ["*"] +# } + +# condition { +# test = "StringNotEquals" +# variable = "aws:sourceVpc" + +# values = [module.vpc.vpc_id] +# } +# } +# } + +# data "aws_iam_policy_document" "generic_endpoint_policy" { +# statement { +# effect = "Deny" +# actions = ["*"] +# resources = ["*"] + +# principals { +# type = "*" +# identifiers = ["*"] +# } + +# condition { +# test = "StringNotEquals" +# variable = "aws:SourceVpc" + +# values = [module.vpc.vpc_id] +# } +# } +# } + +# resource "aws_security_group" "rds" { +# name_prefix = "${local.name}-rds" +# description = "Allow PostgreSQL inbound traffic" +# vpc_id = module.vpc.vpc_id + +# ingress { +# description = "TLS from VPC" +# from_port = 5432 +# to_port = 5432 +# protocol = "tcp" +# cidr_blocks = [module.vpc.vpc_cidr_block] +# } + +# tags = local.tags +# } +output "vpc" { +value = module.vpc +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/outputs.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/outputs.tf new file mode 100644 index 0000000..32381e3 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/outputs.tf @@ -0,0 +1,558 @@ +output "vpc_id" { + description = "The ID of the VPC" + value = module.vpc.vpc_id +} + +output "vpc_arn" { + description = "The ARN of the VPC" + value = module.vpc.vpc_arn +} + +output "vpc_cidr_block" { + description = "The CIDR block of the VPC" + value = module.vpc.vpc_cidr_block +} + +output "default_security_group_id" { + description = "The ID of the security group created by default on VPC creation" + value = module.vpc.default_security_group_id +} + +output "default_network_acl_id" { + description = "The ID of the default network ACL" + value = module.vpc.default_network_acl_id +} + +output "default_route_table_id" { + description = "The ID of the default route table" + value = module.vpc.default_route_table_id +} + +output "vpc_instance_tenancy" { + description = "Tenancy of instances spin up within VPC" + value = module.vpc.vpc_instance_tenancy +} + +output "vpc_enable_dns_support" { + description = "Whether or not the VPC has DNS support" + value = module.vpc.vpc_enable_dns_support +} + +output "vpc_enable_dns_hostnames" { + description = "Whether or not the VPC has DNS hostname support" + value = module.vpc.vpc_enable_dns_hostnames +} + +output "vpc_main_route_table_id" { + description = "The ID of the main route table associated with this VPC" + value = module.vpc.vpc_main_route_table_id +} + +output "vpc_ipv6_association_id" { + description = "The association ID for the IPv6 CIDR block" + value = module.vpc.vpc_ipv6_association_id +} + +output "vpc_ipv6_cidr_block" { + description = "The IPv6 CIDR block" + value = module.vpc.vpc_ipv6_cidr_block +} + +output "vpc_secondary_cidr_blocks" { + description = "List of secondary CIDR blocks of the VPC" + value = module.vpc.vpc_secondary_cidr_blocks +} + +output "vpc_owner_id" { + description = "The ID of the AWS account that owns the VPC" + value = module.vpc.vpc_owner_id +} + +output "private_subnets" { + description = "List of IDs of private subnets" + value = module.vpc.private_subnets +} + +output "private_subnet_arns" { + description = "List of ARNs of private subnets" + value = module.vpc.private_subnet_arns +} + +output "private_subnets_cidr_blocks" { + description = "List of cidr_blocks of private subnets" + value = module.vpc.private_subnets_cidr_blocks +} + +output "private_subnets_ipv6_cidr_blocks" { + description = "List of IPv6 cidr_blocks of private subnets in an IPv6 enabled VPC" + value = module.vpc.private_subnets_ipv6_cidr_blocks +} + +output "public_subnets" { + description = "List of IDs of public subnets" + value = module.vpc.public_subnets +} + +output "public_subnet_arns" { + description = "List of ARNs of public subnets" + value = module.vpc.public_subnet_arns +} + +output "public_subnets_cidr_blocks" { + description = "List of cidr_blocks of public subnets" + value = module.vpc.public_subnets_cidr_blocks +} + +output "public_subnets_ipv6_cidr_blocks" { + description = "List of IPv6 cidr_blocks of public subnets in an IPv6 enabled VPC" + value = module.vpc.public_subnets_ipv6_cidr_blocks +} + +# output "outpost_subnets" { +# description = "List of IDs of outpost subnets" +# value = module.vpc.outpost_subnets +# } + +# output "outpost_subnet_arns" { +# description = "List of ARNs of outpost subnets" +# value = module.vpc.outpost_subnet_arns +# } + +# output "outpost_subnets_cidr_blocks" { +# description = "List of cidr_blocks of outpost subnets" +# value = module.vpc.outpost_subnets_cidr_blocks +# } + +# output "outpost_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of outpost subnets in an IPv6 enabled VPC" +# value = module.vpc.outpost_subnets_ipv6_cidr_blocks +# } + +# output "database_subnets" { +# description = "List of IDs of database subnets" +# value = module.vpc.database_subnets +# } + +# output "database_subnet_arns" { +# description = "List of ARNs of database subnets" +# value = module.vpc.database_subnet_arns +# } + +# output "database_subnets_cidr_blocks" { +# description = "List of cidr_blocks of database subnets" +# value = module.vpc.database_subnets_cidr_blocks +# } + +# output "database_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of database subnets in an IPv6 enabled VPC" +# value = module.vpc.database_subnets_ipv6_cidr_blocks +# } + +# output "database_subnet_group" { +# description = "ID of database subnet group" +# value = module.vpc.database_subnet_group +# } + +# output "database_subnet_group_name" { +# description = "Name of database subnet group" +# value = module.vpc.database_subnet_group_name +# } + +# output "redshift_subnets" { +# description = "List of IDs of redshift subnets" +# value = module.vpc.redshift_subnets +# } + +# output "redshift_subnet_arns" { +# description = "List of ARNs of redshift subnets" +# value = module.vpc.redshift_subnet_arns +# } + +# output "redshift_subnets_cidr_blocks" { +# description = "List of cidr_blocks of redshift subnets" +# value = module.vpc.redshift_subnets_cidr_blocks +# } + +# output "redshift_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of redshift subnets in an IPv6 enabled VPC" +# value = module.vpc.redshift_subnets_ipv6_cidr_blocks +# } + +# output "redshift_subnet_group" { +# description = "ID of redshift subnet group" +# value = module.vpc.redshift_subnet_group +# } + +# output "elasticache_subnets" { +# description = "List of IDs of elasticache subnets" +# value = module.vpc.elasticache_subnets +# } + +# output "elasticache_subnet_arns" { +# description = "List of ARNs of elasticache subnets" +# value = module.vpc.elasticache_subnet_arns +# } + +# output "elasticache_subnets_cidr_blocks" { +# description = "List of cidr_blocks of elasticache subnets" +# value = module.vpc.elasticache_subnets_cidr_blocks +# } + +# output "elasticache_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of elasticache subnets in an IPv6 enabled VPC" +# value = module.vpc.elasticache_subnets_ipv6_cidr_blocks +# } + +# output "intra_subnets" { +# description = "List of IDs of intra subnets" +# value = module.vpc.intra_subnets +# } + +# output "intra_subnet_arns" { +# description = "List of ARNs of intra subnets" +# value = module.vpc.intra_subnet_arns +# } + +# output "intra_subnets_cidr_blocks" { +# description = "List of cidr_blocks of intra subnets" +# value = module.vpc.intra_subnets_cidr_blocks +# } + +# output "intra_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of intra subnets in an IPv6 enabled VPC" +# value = module.vpc.intra_subnets_ipv6_cidr_blocks +# } + +# output "elasticache_subnet_group" { +# description = "ID of elasticache subnet group" +# value = module.vpc.elasticache_subnet_group +# } + +# output "elasticache_subnet_group_name" { +# description = "Name of elasticache subnet group" +# value = module.vpc.elasticache_subnet_group_name +# } + +output "public_route_table_ids" { + description = "List of IDs of public route tables" + value = module.vpc.public_route_table_ids +} + +output "private_route_table_ids" { + description = "List of IDs of private route tables" + value = module.vpc.private_route_table_ids +} + +# output "database_route_table_ids" { +# description = "List of IDs of database route tables" +# value = module.vpc.database_route_table_ids +# } + +# output "redshift_route_table_ids" { +# description = "List of IDs of redshift route tables" +# value = module.vpc.redshift_route_table_ids +# } + +# output "elasticache_route_table_ids" { +# description = "List of IDs of elasticache route tables" +# value = module.vpc.elasticache_route_table_ids +# } + +# output "intra_route_table_ids" { +# description = "List of IDs of intra route tables" +# value = module.vpc.intra_route_table_ids +# } + +output "public_internet_gateway_route_id" { + description = "ID of the internet gateway route" + value = module.vpc.public_internet_gateway_route_id +} + +output "public_internet_gateway_ipv6_route_id" { + description = "ID of the IPv6 internet gateway route" + value = module.vpc.public_internet_gateway_ipv6_route_id +} + +# output "database_internet_gateway_route_id" { +# description = "ID of the database internet gateway route" +# value = module.vpc.database_internet_gateway_route_id +# } + +# output "database_nat_gateway_route_ids" { +# description = "List of IDs of the database nat gateway route" +# value = module.vpc.database_nat_gateway_route_ids +# } + +# output "database_ipv6_egress_route_id" { +# description = "ID of the database IPv6 egress route" +# value = module.vpc.database_ipv6_egress_route_id +# } + +output "private_nat_gateway_route_ids" { + description = "List of IDs of the private nat gateway route" + value = module.vpc.private_nat_gateway_route_ids +} + +output "private_ipv6_egress_route_ids" { + description = "List of IDs of the ipv6 egress route" + value = module.vpc.private_ipv6_egress_route_ids +} + +output "private_route_table_association_ids" { + description = "List of IDs of the private route table association" + value = module.vpc.private_route_table_association_ids +} + +# output "database_route_table_association_ids" { +# description = "List of IDs of the database route table association" +# value = module.vpc.database_route_table_association_ids +# } + +# output "redshift_route_table_association_ids" { +# description = "List of IDs of the redshift route table association" +# value = module.vpc.redshift_route_table_association_ids +# } + +# output "redshift_public_route_table_association_ids" { +# description = "List of IDs of the public redshift route table association" +# value = module.vpc.redshift_public_route_table_association_ids +# } + +# output "elasticache_route_table_association_ids" { +# description = "List of IDs of the elasticache route table association" +# value = module.vpc.elasticache_route_table_association_ids +# } + +# output "intra_route_table_association_ids" { +# description = "List of IDs of the intra route table association" +# value = module.vpc.intra_route_table_association_ids +# } + +output "public_route_table_association_ids" { + description = "List of IDs of the public route table association" + value = module.vpc.public_route_table_association_ids +} + +output "dhcp_options_id" { + description = "The ID of the DHCP options" + value = module.vpc.dhcp_options_id +} + +output "nat_ids" { + description = "List of allocation ID of Elastic IPs created for AWS NAT Gateway" + value = module.vpc.nat_ids +} + +output "nat_public_ips" { + description = "List of public Elastic IPs created for AWS NAT Gateway" + value = module.vpc.nat_public_ips +} + +output "natgw_ids" { + description = "List of NAT Gateway IDs" + value = module.vpc.natgw_ids +} + +output "igw_id" { + description = "The ID of the Internet Gateway" + value = module.vpc.igw_id +} + +output "igw_arn" { + description = "The ARN of the Internet Gateway" + value = module.vpc.igw_arn +} + +output "egress_only_internet_gateway_id" { + description = "The ID of the egress only Internet Gateway" + value = module.vpc.egress_only_internet_gateway_id +} + +output "cgw_ids" { + description = "List of IDs of Customer Gateway" + value = module.vpc.cgw_ids +} + +output "cgw_arns" { + description = "List of ARNs of Customer Gateway" + value = module.vpc.cgw_arns +} + +output "this_customer_gateway" { + description = "Map of Customer Gateway attributes" + value = module.vpc.this_customer_gateway +} + +output "vgw_id" { + description = "The ID of the VPN Gateway" + value = module.vpc.vgw_id +} + +output "vgw_arn" { + description = "The ARN of the VPN Gateway" + value = module.vpc.vgw_arn +} + +output "default_vpc_id" { + description = "The ID of the Default VPC" + value = module.vpc.default_vpc_id +} + +output "default_vpc_arn" { + description = "The ARN of the Default VPC" + value = module.vpc.default_vpc_arn +} + +output "default_vpc_cidr_block" { + description = "The CIDR block of the Default VPC" + value = module.vpc.default_vpc_cidr_block +} + +output "default_vpc_default_security_group_id" { + description = "The ID of the security group created by default on Default VPC creation" + value = module.vpc.default_vpc_default_security_group_id +} + +output "default_vpc_default_network_acl_id" { + description = "The ID of the default network ACL of the Default VPC" + value = module.vpc.default_vpc_default_network_acl_id +} + +output "default_vpc_default_route_table_id" { + description = "The ID of the default route table of the Default VPC" + value = module.vpc.default_vpc_default_route_table_id +} + +output "default_vpc_instance_tenancy" { + description = "Tenancy of instances spin up within Default VPC" + value = module.vpc.default_vpc_instance_tenancy +} + +output "default_vpc_enable_dns_support" { + description = "Whether or not the Default VPC has DNS support" + value = module.vpc.default_vpc_enable_dns_support +} + +output "default_vpc_enable_dns_hostnames" { + description = "Whether or not the Default VPC has DNS hostname support" + value = module.vpc.default_vpc_enable_dns_hostnames +} + +output "default_vpc_main_route_table_id" { + description = "The ID of the main route table associated with the Default VPC" + value = module.vpc.default_vpc_main_route_table_id +} + +output "public_network_acl_id" { + description = "ID of the public network ACL" + value = module.vpc.public_network_acl_id +} + +output "public_network_acl_arn" { + description = "ARN of the public network ACL" + value = module.vpc.public_network_acl_arn +} + +output "private_network_acl_id" { + description = "ID of the private network ACL" + value = module.vpc.private_network_acl_id +} + +output "private_network_acl_arn" { + description = "ARN of the private network ACL" + value = module.vpc.private_network_acl_arn +} + +# output "outpost_network_acl_id" { +# description = "ID of the outpost network ACL" +# value = module.vpc.outpost_network_acl_id +# } + +# output "outpost_network_acl_arn" { +# description = "ARN of the outpost network ACL" +# value = module.vpc.outpost_network_acl_arn +# } + +# output "intra_network_acl_id" { +# description = "ID of the intra network ACL" +# value = module.vpc.intra_network_acl_id +# } + +# output "intra_network_acl_arn" { +# description = "ARN of the intra network ACL" +# value = module.vpc.intra_network_acl_arn +# } + +# output "database_network_acl_id" { +# description = "ID of the database network ACL" +# value = module.vpc.database_network_acl_id +# } + +# output "database_network_acl_arn" { +# description = "ARN of the database network ACL" +# value = module.vpc.database_network_acl_arn +# } + +# output "redshift_network_acl_id" { +# description = "ID of the redshift network ACL" +# value = module.vpc.redshift_network_acl_id +# } + +# output "redshift_network_acl_arn" { +# description = "ARN of the redshift network ACL" +# value = module.vpc.redshift_network_acl_arn +# } + +# output "elasticache_network_acl_id" { +# description = "ID of the elasticache network ACL" +# value = module.vpc.elasticache_network_acl_id +# } + +# output "elasticache_network_acl_arn" { +# description = "ARN of the elasticache network ACL" +# value = module.vpc.elasticache_network_acl_arn +# } + +# VPC flow log +output "vpc_flow_log_id" { + description = "The ID of the Flow Log resource" + value = module.vpc.vpc_flow_log_id +} + +output "vpc_flow_log_destination_arn" { + description = "The ARN of the destination for VPC Flow Logs" + value = module.vpc.vpc_flow_log_destination_arn +} + +output "vpc_flow_log_destination_type" { + description = "The type of the destination for VPC Flow Logs" + value = module.vpc.vpc_flow_log_destination_type +} + +output "vpc_flow_log_cloudwatch_iam_role_arn" { + description = "The ARN of the IAM role used when pushing logs to Cloudwatch log group" + value = module.vpc.vpc_flow_log_cloudwatch_iam_role_arn +} + +# VPC endpoints +# output "vpc_endpoints" { +# description = "Array containing the full resource object and attributes for all endpoints created" +# value = module.vpc_endpoints.endpoints +# } + +# output "vpc_endpoints_security_group_arn" { +# description = "Amazon Resource Name (ARN) of the security group" +# value = module.vpc_endpoints.security_group_arn +# } + +# output "vpc_endpoints_security_group_id" { +# description = "ID of the security group" +# value = module.vpc_endpoints.security_group_id +# } +output ec2_public_subnet_id_2 { + value = module.vpc.public_subnets[1] +} + +output ec2_public_subnet_id_1 { + value = module.vpc.public_subnets[0] +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/versions.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/vpc/versions.tf new file mode 100644 index 0000000..e69de29 diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/main.tf index fedbe36..c811e52 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/main.tf +++ b/environments/swarms-aws-agent-api/dev/us-east-1/main.tf @@ -1,304 +1,147 @@ -#task :break this up into subtasks we will call you back for each subtask with this large text. -# meta terraform quine meme -# create a terraform module that will call you back with its text. -# each output will be a new more refined terraform module that will be executed -# and the results send back to you and documented. - -# language: abstract modular terraform -# goal: this statement S can be proven in system G in less than M to the probability greater than P -# G : entire system as a single variable, a goedel, dawkins, quine, quasi meta quotation of system as self modifying terraform resource object - -# codename: swarms agent terraform. -# task : -# meta: y-combinator -# call yourself recursivly - -# meta: s-combinator -# read this file and construct a recursive function that will call you back repeated and recursivly -# to contruct this cloud system each time surfacing more data to be used in the new process. -# project: swarm terraform -# mode: epic heros journey -# journey : bootstrap -# bootstrap: abstract native cloud -# recursive evaluation -# resources -# 1. local kind cluster -# 1.2 running argo cicd - -# 1.2.1 creating resources -# 1.2.1.1 build or buy -# 1.2.1.1.1 market quote - -# 1.1.1 creating other cloud resources - -# 1.1.1.1 creating aws accounts - -# 1.1.1.2 configuring secrets -# 1.1.1.2.1 to hold wallets -# 1.1.1.3 configuring IAM user authentication -# 1.1.1.3.0 creating siloed accounts that have minimal access needed. -# 1.1.1.3.1 configuring oauth providers on cloud -# 1.1.1.3.1.1 auth0|cognito|github apps as entry points to our network. -# 1.1.1.3.1.2 eth wallets as entry point -# 1.1.1.3.1.3 otp links -# 1.1.1.3.1.4 otp qr code -# qr codes as urls as entry points -# 1.1.1.3.1.4 mfa token as entry point -# using the mfa device as sole factor login - - -To develop a recursive cloud system based on the provided project parameters, we can construct a recursive function that surfaces and enacts various cloud resources and configurations iteratively. Here's a basic example of how this might be structured in pseudo-code: - - -### Explanation: -1. **Recursive Function**: `swarm_terraform(level)` serves as the main function that sets up resources based on the current recursion depth. -2. **Setup Functions**: Helper functions are defined for local clusters, CI/CD, AWS accounts, secrets, and IAM. -3. **Level-Based Logic**: The creation of resources and configurations is determined based on the recursion level, allowing the system to build incrementally. -4. **Termination Condition**: The recursion stops at a defined `MAX_LEVEL` to prevent infinite execution. - -### Steps to Execution: -- Implement specific logic within each helper function as per your project needs. -- Adjust the `MAX_LEVEL` variable based on how detailed or expansive you want the recursive processing to be. - -This structure provides a solid foundation for building out the cloud infrastructure recursively, driven by the initial parameters you've outlined. - - -To express this concept in a modular Terraform-style pseudo-language, you may want to define a resource that captures the essence of your statement S while incorporating the probabilistic and self-referential properties of system G. Here’s a concise example: - -```hcl -resource "godel_dawkins_quine" "system_G" { - name = "G" - probability = var.P - threshold = var.M - - statement { - content = "S can be proven in system G" - complexity = calculate_complexity(content) # define this function for complexity measure - proven_in_time_less_than_M = complexity < var.M - } - - self_modifying { - enabled = true - modifications = ["update_probability", "redefine_threshold"] +locals { + # instance_type = "t3.large" + # instance_type = "t3.medium" + ami_name = "ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*" + name = "swarms" + region = "us-east-2" + domain = var.domain + tags = { + project="swarms" } } +variable domain {} +variable ami_id {} +variable tags {} +variable name {} -variable "P" { - description = "Probability threshold" - type = number - default = 0.95 -} -variable "M" { - description = "Maximum time for proof" - type = number - default = 100 # example limit on time +locals { +# ami_id = data.aws_ami.ami.id + ami_id = var.ami_id + } -``` - - - -It seems there might have been a misunderstanding or you are looking for further clarification. Here's an extended Terraform module example that incorporates recursive processes and configurations for cloud resources based on your initial request: - -```hcl -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 3.0" - } - } +# SLOW +module "vpc" { + source = "./components/vpc" } -variable "max_levels" { - description = "Maximum level of recursion" - type = number - default = 3 +locals { + ec2_public_subnet_id_1 = module.vpc.ec2_public_subnet_id_1 + ec2_public_subnet_id_2 = module.vpc.ec2_public_subnet_id_2 + vpc_id = module.vpc.vpc_id } -resource "aws_iam_user" "swarm_user" { - count = var.max_levels - - name = "swarm_user_${count.index}" - - tags = { - Level = count.index - } +module "security" { + source = "./components/security" + vpc_id = local.vpc_id + tags = local.tags + name = local.name } -resource "aws_secretsmanager_secret" "swarm_secret" { - count = var.max_levels - - name = "swarm_secret_${count.index}" - - description = "Secret for level ${count.index}" +module "kp" { + source = "./components/keypairs" } -resource "aws_cognito_user_pool" "swarm_user_pool" { - count = var.max_levels +# module "lt" { +# instance_type = local.instance_type +# security_group_id = module.security.security_group_id +# source = "./components/launch_template" +# } - name = "swarm_user_pool_${count.index}" - alias_attributes = ["email"] +# module "asg" { +# source = "./components/autoscaling_group" +# name="swarms" +# security_group_id = module.security.security_group_id +# instance_type = local.instance_type +# launch_template_id = module.lt.launch_template_id +# } - lambda_config { - pre_sign_up = aws_lambda_function.pre_sign_up[count.index].arn - } +variable "instance_types" { + type = list(string) + default = [ + # "t4g.nano", "t3a.nano", "t3.nano", "t2.nano", + # "t4g.micro", "t3a.micro", "t3.micro", "t2.micro", "t1.micro", + #"t4g.small", "t3a.small", + #"t3.small", + #"t2.small", not working + # "t2.medium" # + "t3.medium" + ] } -resource "aws_lambda_function" "pre_sign_up" { - count = var.max_levels - - function_name = "pre_sign_up_${count.index}" - runtime = "nodejs14.x" - - handler = "index.handler" - source_code_hash = filebase64sha256("path_to_your_lambda_zip_${count.index}.zip") - - role = aws_iam_role.lambda_exec.arn -} +resource "aws_iam_role" "ssm" { + name = "ssm-${local.name}" + tags = var.tags -resource "aws_iam_role" "lambda_exec" { - name = "lambda_exec_role" - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [{ - Action = "sts:AssumeRole" - Principal = { - Service = "lambda.amazonaws.com" + Version = "2012-10-17", + Statement = [ + { + Action = "sts:AssumeRole", + Principal = { + Service = "ec2.amazonaws.com" + }, + Effect = "Allow", + Sid = "" } - Effect = "Allow" - Sid = "" - }] + ] }) } -output "swarm_users" { - value = aws_iam_user.swarm_user[*].name +resource "aws_iam_instance_profile" "ssm" { + name = "ssm-${var.name}" + role = aws_iam_role.ssm.name + tags = var.tags } -output "swarm_secrets" { - value = aws_secretsmanager_secret.swarm_secret[*].name +module "lt_dynamic" { + vpc_id = local.vpc_id + for_each = toset(var.instance_types) + instance_type = each.key + name = "swarms-size-${each.key}" + security_group_id = module.security.internal_security_group_id + ami_id = var.ami_id + tags= local.tags + source = "./components/launch_template" + iam_instance_profile_name = aws_iam_instance_profile.ssm.name } -output "swarm_user_pools" { - value = aws_cognito_user_pool.swarm_user_pool[*].id +output security_group_id { + value = module.security.security_group_id } -``` - -### Key Components: - -- **Recursive Resource Creation**: The use of the `count` parameter simulates recursion by creating multiple instances of resources based on the specified `max_levels`. - -- **IAM Users and Secrets**: Creates IAM users and AWS secrets for each level. - -- **Cognito User Pools**: Sets up a user pool for authentication at each recursion depth. - -- **Outputs**: Outputs the created IAM users, secrets, and Cognito user pools, allowing you to track the resources created at each level. - -### Execution: -- Adjust `max_levels` as needed to control the depth of resource creation. -- Ensure that the paths to any Lambda function code or specific policies are set correctly. - -This example captures the essence of a meta Terraform module while following the recursive and self-referential principles you've outlined. Let me know how you would like to expand or modify this! - -It seems you're looking for further examples or a continuation of the Terraform module design. Here's a more comprehensive modular approach that emphasizes recursion and the creation of various cloud resources, broken down into subtasks for clarity: - -### Main Terraform Configuration - -```hcl -module "swarm_terraform" { - source = "./modules/swarm" # Define the module source - level = 1 # Start at recursion level 1 - max_levels = var.max_levels # Maximum recursion depth - aws_region = var.aws_region # AWS region variable +output vpc { + value = module.vpc } -variable "max_levels" { - description = "Maximum depth for resource creation" - type = number - default = 3 -} -variable "aws_region" { - description = "AWS region to deploy resources" - type = string - default = "us-east-1" +module "alb" { + source = "./components/application_load_balancer" + domain_name = local.domain + security_group_id = module.security.security_group_id # allowed to talk to internal + public_subnets = [ + local.ec2_public_subnet_id_1, + local.ec2_public_subnet_id_2 ] + vpc_id = local.vpc_id + name = local.name } -``` - -### Module: `modules/swarm/main.tf` - -```hcl -resource "aws_iam_user" "swarm_user" { - count = var.level <= var.max_levels ? 1 : 0 - - name = "swarm_user_${var.level}" - - tags = { - Level = var.level - } +output alb { + value = module.alb } -resource "aws_secretsmanager_secret" "swarm_secret" { - count = var.level <= var.max_levels ? 1 : 0 - name = "swarm_secret_${var.level}" +module "asg_dynamic" { + tags = local.tags + vpc_id = local.vpc_id + image_id = local.ami_id + ec2_subnet_id = module.vpc.ec2_public_subnet_id_1 + for_each = toset(var.instance_types) + aws_iam_instance_profile_ssm_arn = aws_iam_instance_profile.ssm.arn + source = "./components/autoscaling_group" +# security_group_id = module.security.internal_security_group_id + instance_type = each.key + name = "swarms-size-${each.key}" + launch_template_id = module.lt_dynamic[each.key].launch_template_id + target_group_arn = module.alb.alb_target_group_arn } - -resource "aws_lambda_function" "pre_sign_up" { - count = var.level <= var.max_levels ? 1 : 0 - - function_name = "pre_sign_up_${var.level}" - runtime = "nodejs14.x" - handler = "index.handler" - - # Assuming the source code is provided - source_code_hash = filebase64sha256("path_to_your_lambda_zip_${var.level}.zip") - role = aws_iam_role.lambda_exec.arn -} - -resource "aws_iam_role" "lambda_exec" { - name = "lambda_exec_role" - - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [{ - Action = "sts:AssumeRole" - Principal = { - Service = "lambda.amazonaws.com" - } - Effect = "Allow" - Sid = "" - }] - }) -} - -# Recursive module call to create resources for the next level -module "swarm_terraform_next" { - source = "./main.tf" - level = var.level + 1 - max_levels = var.max_levels - aws_region = var.aws_region - depends_on = [aws_iam_user.swarm_user, aws_secretsmanager_secret.swarm_secret, aws_lambda_function.pre_sign_up] -} -``` - -### Explanation: - -1. **Modular Structure**: This approach divides the resources into a module, making it reusable and expandable. - -2. **Recursion Handling**: The module checks if the current `level` is less than or equal to `max_levels`. If it is, it creates the corresponding resources and calls itself for the next level. - -3. **IAM User and Lambda Function**: Each recurred level creates new IAM users, Secrets Manager secrets, and Lambda functions, maintaining the recursion. - -4. **Outputs**: Outputs can be included to retrieve created resource details at the end of processing, although they are not explicitly shown in this snippet. - -### Execution Steps: -- Ensure you have the appropriate IAM permissions and AWS configuration. -- Adjust the `max_levels` variable as desired, and ensure the paths to any Lambda function source code are correctly defined. -- Use `terraform apply` to trigger the recursion and monitor resource creation. - -This format should give you a robust foundation for developing a complex, recursive Terraform setup. If you need further adjustments or detailed explanations on specific elements, let me know! diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/refresh.sh b/environments/swarms-aws-agent-api/dev/us-east-1/refresh.sh new file mode 100644 index 0000000..40ce118 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/refresh.sh @@ -0,0 +1,2 @@ +aws autoscaling start-instance-refresh \ + --auto-scaling-group-name swarms-20241214050736278200000007 diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/seed.md b/environments/swarms-aws-agent-api/dev/us-east-1/seed.md new file mode 100644 index 0000000..e49e882 --- /dev/null +++ b/environments/swarms-aws-agent-api/dev/us-east-1/seed.md @@ -0,0 +1,304 @@ +# #task :break this up into subtasks we will call you back for each subtask with this large text. +# # meta terraform quine meme +# # create a terraform module that will call you back with its text. +# # each output will be a new more refined terraform module that will be executed +# # and the results send back to you and documented. + +# # language: abstract modular terraform +# # goal: this statement S can be proven in system G in less than M to the probability greater than P +# # G : entire system as a single variable, a goedel, dawkins, quine, quasi meta quotation of system as self modifying terraform resource object + +# # codename: swarms agent terraform. +# # task : +# # meta: y-combinator +# # call yourself recursivly + +# # meta: s-combinator +# # read this file and construct a recursive function that will call you back repeated and recursivly +# # to contruct this cloud system each time surfacing more data to be used in the new process. +# # project: swarm terraform +# # mode: epic heros journey +# # journey : bootstrap +# # bootstrap: abstract native cloud +# # recursive evaluation +# # resources +# # 1. local kind cluster +# # 1.2 running argo cicd + +# # 1.2.1 creating resources +# # 1.2.1.1 build or buy +# # 1.2.1.1.1 market quote + +# # 1.1.1 creating other cloud resources + +# # 1.1.1.1 creating aws accounts + +# # 1.1.1.2 configuring secrets +# # 1.1.1.2.1 to hold wallets +# # 1.1.1.3 configuring IAM user authentication +# # 1.1.1.3.0 creating siloed accounts that have minimal access needed. +# # 1.1.1.3.1 configuring oauth providers on cloud +# # 1.1.1.3.1.1 auth0|cognito|github apps as entry points to our network. +# # 1.1.1.3.1.2 eth wallets as entry point +# # 1.1.1.3.1.3 otp links +# # 1.1.1.3.1.4 otp qr code +# # qr codes as urls as entry points +# # 1.1.1.3.1.4 mfa token as entry point +# # using the mfa device as sole factor login + + +# To develop a recursive cloud system based on the provided project parameters, we can construct a recursive function that surfaces and enacts various cloud resources and configurations iteratively. Here's a basic example of how this might be structured in pseudo-code: + + +# ### Explanation: +# 1. **Recursive Function**: `swarm_terraform(level)` serves as the main function that sets up resources based on the current recursion depth. +# 2. **Setup Functions**: Helper functions are defined for local clusters, CI/CD, AWS accounts, secrets, and IAM. +# 3. **Level-Based Logic**: The creation of resources and configurations is determined based on the recursion level, allowing the system to build incrementally. +# 4. **Termination Condition**: The recursion stops at a defined `MAX_LEVEL` to prevent infinite execution. + +# ### Steps to Execution: +# - Implement specific logic within each helper function as per your project needs. +# - Adjust the `MAX_LEVEL` variable based on how detailed or expansive you want the recursive processing to be. + +# This structure provides a solid foundation for building out the cloud infrastructure recursively, driven by the initial parameters you've outlined. + + +# To express this concept in a modular Terraform-style pseudo-language, you may want to define a resource that captures the essence of your statement S while incorporating the probabilistic and self-referential properties of system G. Here’s a concise example: + +# ```hcl +# resource "godel_dawkins_quine" "system_G" { +# name = "G" +# probability = var.P +# threshold = var.M + +# statement { +# content = "S can be proven in system G" +# complexity = calculate_complexity(content) # define this function for complexity measure +# proven_in_time_less_than_M = complexity < var.M +# } + +# self_modifying { +# enabled = true +# modifications = ["update_probability", "redefine_threshold"] +# } +# } + +# variable "P" { +# description = "Probability threshold" +# type = number +# default = 0.95 +# } + +# variable "M" { +# description = "Maximum time for proof" +# type = number +# default = 100 # example limit on time +# } +# ``` + + + + +# It seems there might have been a misunderstanding or you are looking for further clarification. Here's an extended Terraform module example that incorporates recursive processes and configurations for cloud resources based on your initial request: + +# ```hcl +# terraform { +# required_providers { +# aws = { +# source = "hashicorp/aws" +# version = "~> 3.0" +# } +# } +# } + +# variable "max_levels" { +# description = "Maximum level of recursion" +# type = number +# default = 3 +# } + +# resource "aws_iam_user" "swarm_user" { +# count = var.max_levels + +# name = "swarm_user_${count.index}" + +# tags = { +# Level = count.index +# } +# } + +# resource "aws_secretsmanager_secret" "swarm_secret" { +# count = var.max_levels + +# name = "swarm_secret_${count.index}" + +# description = "Secret for level ${count.index}" +# } + +# resource "aws_cognito_user_pool" "swarm_user_pool" { +# count = var.max_levels + +# name = "swarm_user_pool_${count.index}" +# alias_attributes = ["email"] + +# lambda_config { +# pre_sign_up = aws_lambda_function.pre_sign_up[count.index].arn +# } +# } + +# resource "aws_lambda_function" "pre_sign_up" { +# count = var.max_levels + +# function_name = "pre_sign_up_${count.index}" +# runtime = "nodejs14.x" + +# handler = "index.handler" +# source_code_hash = filebase64sha256("path_to_your_lambda_zip_${count.index}.zip") + +# role = aws_iam_role.lambda_exec.arn +# } + +# resource "aws_iam_role" "lambda_exec" { +# name = "lambda_exec_role" + +# assume_role_policy = jsonencode({ +# Version = "2012-10-17" +# Statement = [{ +# Action = "sts:AssumeRole" +# Principal = { +# Service = "lambda.amazonaws.com" +# } +# Effect = "Allow" +# Sid = "" +# }] +# }) +# } + +# output "swarm_users" { +# value = aws_iam_user.swarm_user[*].name +# } + +# output "swarm_secrets" { +# value = aws_secretsmanager_secret.swarm_secret[*].name +# } + +# output "swarm_user_pools" { +# value = aws_cognito_user_pool.swarm_user_pool[*].id +# } +# ``` + +# ### Key Components: + +# - **Recursive Resource Creation**: The use of the `count` parameter simulates recursion by creating multiple instances of resources based on the specified `max_levels`. + +# - **IAM Users and Secrets**: Creates IAM users and AWS secrets for each level. + +# - **Cognito User Pools**: Sets up a user pool for authentication at each recursion depth. + +# - **Outputs**: Outputs the created IAM users, secrets, and Cognito user pools, allowing you to track the resources created at each level. + +# ### Execution: +# - Adjust `max_levels` as needed to control the depth of resource creation. +# - Ensure that the paths to any Lambda function code or specific policies are set correctly. + +# This example captures the essence of a meta Terraform module while following the recursive and self-referential principles you've outlined. Let me know how you would like to expand or modify this! + + +# It seems you're looking for further examples or a continuation of the Terraform module design. Here's a more comprehensive modular approach that emphasizes recursion and the creation of various cloud resources, broken down into subtasks for clarity: + +# ### Main Terraform Configuration + +# ```hcl +# module "swarm_terraform" { +# source = "./modules/swarm" # Define the module source +# level = 1 # Start at recursion level 1 +# max_levels = var.max_levels # Maximum recursion depth +# aws_region = var.aws_region # AWS region variable +# } + +# variable "max_levels" { +# description = "Maximum depth for resource creation" +# type = number +# default = 3 +# } + +# variable "aws_region" { +# description = "AWS region to deploy resources" +# type = string +# default = "us-east-1" +# } +# ``` + +# ### Module: `modules/swarm/main.tf` + +# ```hcl +# resource "aws_iam_user" "swarm_user" { +# count = var.level <= var.max_levels ? 1 : 0 + +# name = "swarm_user_${var.level}" + +# tags = { +# Level = var.level +# } +# } + +# resource "aws_secretsmanager_secret" "swarm_secret" { +# count = var.level <= var.max_levels ? 1 : 0 + +# name = "swarm_secret_${var.level}" +# } + +# resource "aws_lambda_function" "pre_sign_up" { +# count = var.level <= var.max_levels ? 1 : 0 + +# function_name = "pre_sign_up_${var.level}" +# runtime = "nodejs14.x" +# handler = "index.handler" + +# # Assuming the source code is provided +# source_code_hash = filebase64sha256("path_to_your_lambda_zip_${var.level}.zip") +# role = aws_iam_role.lambda_exec.arn +# } + +# resource "aws_iam_role" "lambda_exec" { +# name = "lambda_exec_role" + +# assume_role_policy = jsonencode({ +# Version = "2012-10-17" +# Statement = [{ +# Action = "sts:AssumeRole" +# Principal = { +# Service = "lambda.amazonaws.com" +# } +# Effect = "Allow" +# Sid = "" +# }] +# }) +# } + +# # Recursive module call to create resources for the next level +# module "swarm_terraform_next" { +# source = "./main.tf" +# level = var.level + 1 +# max_levels = var.max_levels +# aws_region = var.aws_region +# depends_on = [aws_iam_user.swarm_user, aws_secretsmanager_secret.swarm_secret, aws_lambda_function.pre_sign_up] +# } +# ``` + +# ### Explanation: + +# 1. **Modular Structure**: This approach divides the resources into a module, making it reusable and expandable. + +# 2. **Recursion Handling**: The module checks if the current `level` is less than or equal to `max_levels`. If it is, it creates the corresponding resources and calls itself for the next level. + +# 3. **IAM User and Lambda Function**: Each recurred level creates new IAM users, Secrets Manager secrets, and Lambda functions, maintaining the recursion. + +# 4. **Outputs**: Outputs can be included to retrieve created resource details at the end of processing, although they are not explicitly shown in this snippet. + +# ### Execution Steps: +# - Ensure you have the appropriate IAM permissions and AWS configuration. +# - Adjust the `max_levels` variable as desired, and ensure the paths to any Lambda function source code are correctly defined. +# - Use `terraform apply` to trigger the recursion and monitor resource creation. + +# This format should give you a robust foundation for developing a complex, recursive Terraform setup. If you need further adjustments or detailed explanations on specific elements, let me know!