diff --git a/.github/workflows/call-swarms.yml b/.github/workflows/call-swarms.yml new file mode 100644 index 0000000..1797f9f --- /dev/null +++ b/.github/workflows/call-swarms.yml @@ -0,0 +1,27 @@ +on: + workflow_dispatch: + push: + branches: + - "feature/docker" +# pull_request: +# branches: +# - "main" +# paths: +# - ".github/workflows/call-swarms.yaml" + +name: CallSwarms +jobs: + caller-identity: + name: Return the IAM user + permissions: + contents: read + id-token: write + runs-on: ubuntu-latest + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: ${{ secrets.AWS_REGION }} + role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/github + - uses: actions/checkout@v2 + - run: bash -x -e ./actions/call_swarms.sh diff --git a/.gitignore b/.gitignore index 0f90f0f..cbff7d2 100644 --- a/.gitignore +++ b/.gitignore @@ -88,3 +88,5 @@ flycheck_*.el # network security /network-security.data +/logs/logs/ +.aider* diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..de928aa --- /dev/null +++ b/Makefile @@ -0,0 +1,2 @@ +test: + find -name \*.tf -exec grep -r provider {} \; -print diff --git a/README.md b/README.md index afa820f..e7df890 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,140 @@ +# one click installer plan + +Using cloudformation +https://github.com/meta-introspector/cfn-tf-meta-introspector/issues/1 still need someone to try this + +we want a step by step instructions to setup your aws +and then they click on this +https://console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks/new?stackName=tfcfn-custom-type-resources&templateURL=https://s3.amazonaws.com/ianmckay-us-east-1/cfn-tf-custom-types/template.yml + +and then we port our repo https://github.com/meta-introspector/ai-agent-terraform/tree/feature/aitokenteam into running inside of cloudformation + +TODO : Great asset + transform our terraform resources into cloudformation yaml to use tf provider + we could deploy azure or gcp from aws in terraform. + +For the mean time, we will deploy terraform. + +Another alternative is +https://runatlantis.io/ + +# solfun meme branch of ai-agent terraform + +This will deploy the : +1. solfunmeme dao agent to work for the solfunmeme dao +1.1. (eliza) + +2. solfunmeme web server(done) +2.1 to vercel +2.1.1. (done, manually not terraformed) +2.2 to aws (TBD) +https://codeberg.org/introspector/SOLFUNMEME/issues/25 + +stakeholders will login via the phantom wallet compatible web app with wallet and be able to vote on important decisions : + +3. terraform to admin discord : +who is admin in + telegram + discord + +what versions of what bots with what permissions do we have in telegram +what software projects are important to work on +what marketing and listing should we persue +etc. +This will be the core of the dao, I will use this as my oracle for every day decisions and try and act in the best interest of the team and hope all of us will. + +We are going to build something amazing, I have tons of work ready, many ideas ready to launch when we have this running. + +3. the dns records to namecheap. +3.1 done manually +3.2 add terraform + +4. cognito with web3 Integration +https://codeberg.org/introspector/SOLFUNMEME/issues/5 + +## agent spec : + +What the agent will do + +resolve tickets : + in codeberg and + in github and + later in gitlab, jira + +but talk in: + discord, + telegram, + twitter it should + +the agent will update the main web page as well. +we will deploy the main page to terraform as well. + +LANGUAGE API: +Deploy open lite llm server, connect to multiple backends. + # terraform-template +``` +git clone https://github.com/aitokenteam/ai-agent-terraform.git +cd ai-agent-terraform/accounts/AI_Token_Team/ +git checkout feature/aitokenteam + +terraform init + +aws ecr create-repository --repository-name agent/eliza +aws ecr create-repository --repository-name nodemodules/sql-lite-vec + +aws ssm put-parameter --name "agent_openai_key" --value "${OPENAI_API_KEY}" --type String + +terraform apply --target module.ssm_observer.aws_ssm_parameter.cw_agent_config --target module.ssm_observer.aws_ssm_parameter.cw_agent_config_details +terraform apply +``` + Terraform project template for deploying infrastructure across multiple environments and regions, following best practices with modular structure and automated syntax checks (GitHub Actions) +## Installation + +1. setup aws account +2. create user with administrator privledges for terraform/tofu + +Directly attach Policy name `AdministratorAccess` to user +replace in the following url +`https://us-east-1.console.aws.amazon.com/iam/home?region=us-east-2#/users/details/?section=permissions` + +create access key +https://us-east-1.console.aws.amazon.com/iam/home?region=us-east-2#/users/details/mdupont/create-access-key + +~/.aws/credentials + +aws cli install +https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html + +`aws configure` + +opentofu install +https://opentofu.org/docs/intro/install/ + +Go to ami catalog in your region: +https://us-west-1.console.aws.amazon.com/ec2/home?region=us-west-1#AMICatalog: + +Search for "ubuntu-noble-24.04-amd64-minimal" in the third tab "aws marketplace amis" + +Accept license. + +## Status report + +Currently we are building out a dev infrastructure using our existing code +and adding in a docker target, this has the advantage that we can build and test +the docker outside of aws and then deploy it easily. + ## Repo structure ``` terraform-template/ # Root directory of the Terraform template repository ├── README.md # Project documentation and overview -├── environments +├── accounts +│ ├── swarms # the main swarms account +├── environments │ ├── swarms-aws-agent-api │ │ ├── dev # Development environment configuration for the demo-azure-vm setup │ │ │ └── us-east-1 @@ -15,3 +142,113 @@ terraform-template/ # Root directory of the Terraform template │ ├── swarms ``` +to ss to the server +`aws ssm start-session --target i-0e156165e86473c93 --profile mdupont --region us-east-2` + +to install secret +`aws ssm put-parameter --name "agent_openai_key" --value "${OPENAI_API_KEY}" --type String` + +If you get this error: +``` +│ Error: reading SSM Parameter (arn:aws:ssm:us-east-1:AKIA4SYAMCQ5MMLC6NU3:parameter/cloudwatch-agent/config/details): operation error SSM: GetParameter, https response error StatusCode: 400, RequestID: 159177cb-91f0-4c2e-a354-07cdc2e64041, api error ValidationException: Invalid Account Id in: arn:aws:ssm:us-east-1:AKIA4SYAMCQ5MMLC6NU3:parameter/cloudwatch-agent/config/details +│ with module.eliza_server.module.eliza.module.lt_docker["t3a.small"].data.aws_ssm_parameter.cw_agent_config, +│ on ../../environments/eliza-agent-api/components/launch_template_docker_mcs/main.tf line 77, in data "aws_ssm_parameter" "cw_agent_config": +│ 77: data "aws_ssm_parameter" "cw_agent_config" { +``` +we need to apply this first: +`tofu apply --target module.ssm_observer.aws_ssm_parameter.cw_agent_config --target module.ssm_observer.aws_ssm_parameter.cw_agent_config_details` + +check the ECR images +`aws ecr list-images --region us-east-2 --repository-name swarms/mcs` + +To login from docker to ecr +`docker login -u AWS -p $(aws ecr get-login-password --region us-east-2) 767503528736.dkr.ecr.us-east-2.amazonaws.com' 767503528736.dkr.ecr.us-east-2.amazonaws.com/agent/eliza:latest` + +# setup +we dont use terraform for the ecr or the secrets because they will change so often here are the simple aws scripts for them. + +`aws ecr create-repository --repository-name agent/eliza` + +`set_secrets.sh` calls +`aws ssm put-parameter --name "agent_openai_key" --value "${OPENAI_API_KEY}" --type String` + +## connecting with server +`ssh-ssm.py` to find the server + +for example: +`aws ssm start-session --target i-0e156165e86473c93 --profile mdupont --region us-east-2` + +## terraform-aws-oidc-github +https://github.com/jmikedupont2/terraform-aws-oidc-github + +in my time 2024/12/18/terraform-aws-oidc-github on branch +run the tofu apply in the example after editing the variables and files. + +# debug +`pnpm start:debug --characters=./characters/eliza.character.json` +start direct client here + +# set the az + #~/terraform/accounts/AI_Token_Team/main.tf +edit + aws_availability_zones = ["us-west-1a","us-west-1b","us-west-1c"] + +if you ge the errror: +00000003] +module.ssm_setup.module.ssm.aws_s3_bucket_lifecycle_configuration.access_log_bucket: Creation complete after 32s [id=ai-token-team-session-access-logs-20250113203757298300000002] +╷ +│ Error: creating EC2 Subnet: operation error EC2: CreateSubnet, https response error StatusCode: 400, RequestID: 05eeaa25-0855-4d71-8945-04b0d6233520, api error InvalidParameterValue: Value (us-west-1b) for parameter availabilityZone is invalid. Subnets can currently only be created in the following availability zones: us-east-1a, us-east-1b, us-east-1c, us-east-1d, us-east-1e, us-east-1f. +│ +│ with module.eliza_server.module.vpc.module.vpc.aws_subnet.public[1], +│ on .terraform/modules/eliza_server.vpc.vpc/main.tf line 98, in resource "aws_subnet" "public": +│ 98: resource "aws_subnet" "public" { +│ + + +nter a value: yes + +# key pair +module.eliza_server.module.eliza.module.asg["t3a.small"].module.autoscaling.aws_autoscaling_group.this[0]: Creating... +╷ +│ Error: creating Auto Scaling Group (docker-agent-ami-t3a.small-20250113205357496400000001): operation error Auto Scaling: CreateAutoScalingGroup, https response error StatusCode: 400, RequestID: 380d0f57-bf60-40fd-ace6-d86563107c85, api error ValidationError: You must use a valid fully-formed launch template. The key pair does not exist +│ +│ with module.eliza_server.module.eliza.module.asg["t3a.small"].module.autoscaling.aws_autoscaling_group.this[0], +│ on .terraform/modules/eliza_server.eliza.asg.autoscaling/main.tf line 347, in resource "aws_autoscaling_group" "this": +│ 347: resource "aws_autoscaling_group" "this" { +│ +edit +~/terraform/environments/eliza-agent-api/components/keypairs/main.tf + +resource "aws_key_pair" "deployer" { + key_name = "ai-token-deployer-key" + public_key = " ns-864.awsdns-44.net. +2. NS api -> ns-1595.awsdns-07.co.uk. +3. NS api -> ns-1331.awsdns-38.org. +4. NS api -> ns-463.awsdns-57.com. + + + + +see forum https://repost.aws/questions/QULXL3STgjQtefiJ_q0BixXA/configure-godaddy-subdomain-to-route53 + +it says ns records need fqdn! + +``` +dig NS api.swarms.ai @97.74.103.14 + +; <<>> DiG 9.18.28-0ubuntu0.22.04.1-Ubuntu <<>> NS api.swarms.ai @97.74.103.14 +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 42722 +;; flags: qr rd; QUERY: 1, ANSWER: 0, AUTHORITY: 4, ADDITIONAL: 1 +;; WARNING: recursion requested but not available + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags:; udp: 1232 +;; QUESTION SECTION: +;api.swarms.ai. IN NS + +;; AUTHORITY SECTION: +api.swarms.ai. 3600 IN NS ns-1912.awsdns-47.co.uk. +api.swarms.ai. 3600 IN NS ns-184.awsdns-23.com. +api.swarms.ai. 3600 IN NS ns-598.awsdns-10.net. +api.swarms.ai. 3600 IN NS ns-1175.awsdns-18.org. + +;; Query time: 5 msec +;; SERVER: 97.74.103.14#53(97.74.103.14) (UDP) +;; WHEN: Wed Dec 18 09:20:45 EST 2024 +;; MSG SIZE rcvd: 182 +``` + +``` +mdupont@mdupont-G470:~/swarms-terraform/accounts/swarms$ dig NS api.swarms.ai +trace + +; <<>> DiG 9.18.28-0ubuntu0.22.04.1-Ubuntu <<>> NS api.swarms.ai +trace +;; global options: +cmd +. 198100 IN NS b.root-servers.net. +. 198100 IN NS j.root-servers.net. +. 198100 IN NS a.root-servers.net. +. 198100 IN NS d.root-servers.net. +. 198100 IN NS c.root-servers.net. +. 198100 IN NS g.root-servers.net. +. 198100 IN NS l.root-servers.net. +. 198100 IN NS i.root-servers.net. +. 198100 IN NS h.root-servers.net. +. 198100 IN NS f.root-servers.net. +. 198100 IN NS e.root-servers.net. +. 198100 IN NS m.root-servers.net. +. 198100 IN NS k.root-servers.net. +;; Received 811 bytes from 127.0.0.53#53(127.0.0.53) in 10 ms + +ai. 172800 IN NS v0n0.nic.ai. +ai. 172800 IN NS v0n1.nic.ai. +ai. 172800 IN NS v0n2.nic.ai. +ai. 172800 IN NS v0n3.nic.ai. +ai. 172800 IN NS v2n0.nic.ai. +ai. 172800 IN NS v2n1.nic.ai. +ai. 86400 IN DS 44137 8 2 7886BD35ED745DCA983D951A643495B929B3A7676A88C682EF88EB6F EDBDB253 +ai. 86400 IN DS 3799 8 2 8A8030D4661AE6FCF417349682AC058648371002E70E717E4CF2F11F 83543385 +ai. 86400 IN RRSIG DS 8 1 86400 20241231050000 20241218040000 61050 . chqcZJHy4mAsB6DryQAHcvFBsUDVkhHQStDq65NbEXoeo+sfNsRWVpGV qyibbDL8nLY0QDOifh5EXu1Mnf6ZXqs8NPaPBEwCpA9oVmRA0t3vG2th jrDhKY77f4iL4ovMQLBSYbF5x61HnFZXcgyI22YDbbChsC6rCwmNJnwj sldGSNknyRy4ytEwbsWYquRmXIzSHJ2O9lMw1l/vUHpw9/xo6k26TyhZ 3bydt6Sg/e56zwevU0oW1sRpR9aKwn4x/0X0txKmUo+2wWtJr/GXLJ28 uWIuEF71Tvg2QKM0XqZ2CLeURCkU3v4sV92vKQ3rY0GkMiKKlWYFaC8Q Ev4+0A== +;; Received 807 bytes from 199.7.91.13#53(d.root-servers.net) in 11 ms + +;; communications error to 2001:500:a4::1#53: timed out +;; communications error to 2001:500:a4::1#53: timed out +;; communications error to 2001:500:a4::1#53: timed out +;; communications error to 2001:500:a1::1#53: timed out +;; communications error to 2001:500:a2::1#53: timed out +;; communications error to 2001:500:a5::1#53: timed out +swarms.ai. 3600 IN NS ns27.domaincontrol.com. +swarms.ai. 3600 IN NS ns28.domaincontrol.com. +58cj07tk4r4uuu6m10c83sia655jfil6.ai. 86400 IN NSEC3 1 1 0 73 58MDDLU23QVIIIQ5GPLB3A6K7OB4F5JH NS SOA TXT RRSIG DNSKEY NSEC3PARAM ZONEMD +jib3vggauf3u1alb3kfuqrcjo6a0v2hq.ai. 86400 IN NSEC3 1 1 0 73 JIGGLMUFEJ6D5CFLQAC5CFQICTP7IJTE NS DS RRSIG +58cj07tk4r4uuu6m10c83sia655jfil6.ai. 86400 IN RRSIG NSEC3 8 2 86400 20250108141827 20241218131827 6279 ai. r8VEiuIyhowQ2sXxszJEgCBMnMEkyboj418iO/jJfUKxWM408IJTSiuO aALz97JNhHMyzPxScRCO+Vcr3EOuoBknhiO5oO9w7UDnuzxNRyPuevV6 WdloLDUc3GRKSPxWom4/Dh+yaMTBXr2xiDDpIvmAElU5q1oGceB+5wWf 4i4= +jib3vggauf3u1alb3kfuqrcjo6a0v2hq.ai. 86400 IN RRSIG NSEC3 8 2 86400 20250107151955 20241217141955 6279 ai. vyYgTKyNXo+kYzRoc0zYeR544efw1GPI4br3GtS4lRaUwzc3sEFKtoyo /nNGBWKgnYxlWyhrAgTvCQTLO1Qt6uJWyHVcog+6hcVcbeFsL6whp/u8 LKHOtSFg2C/FzqP3JktiSPO5CcQh6WiBik2KXhkD00lMjXfStciqk9nP osk= +;; Received 583 bytes from 199.115.156.1#53(v2n0.nic.ai) in 22 ms + +api.swarms.ai. 3600 IN NS ns-1912.awsdns-47.co.uk. +api.swarms.ai. 3600 IN NS ns-184.awsdns-23.com. +api.swarms.ai. 3600 IN NS ns-598.awsdns-10.net. +api.swarms.ai. 3600 IN NS ns-1175.awsdns-18.org. +;; Received 182 bytes from 173.201.71.14#53(ns28.domaincontrol.com) in 5 ms + +;; Received 31 bytes from 205.251.199.120#53(ns-1912.awsdns-47.co.uk) in 4 ms +``` + +https://toolbox.googleapps.com/apps/dig/#ANY/ returns +for swarms.ai this: +``` +id 44700 +opcode QUERY +rcode NOERROR +flags QR RD RA +;QUESTION +swarms.ai. IN ANY +;ANSWER +swarms.ai. 3600 IN A 15.197.225.128 +swarms.ai. 3600 IN A 3.33.251.168 +swarms.ai. 3600 IN NS ns28.domaincontrol.com. +swarms.ai. 3600 IN NS ns27.domaincontrol.com. +swarms.ai. 3600 IN SOA ns27.domaincontrol.com. dns.jomax.net. 2024121702 28800 7200 604800 600 +swarms.ai. 3600 IN TXT "google-site-verification=VlUvNHJo0LQzJzm7SIwMzYLB7-Rexx4yxcSJKh0VtjE" +;AUTHORITY +;ADDITIONAL +``` + +# tofu apply +The new apply + +``` +tofu plan -var "github_token=$(gh auth token)" +tofu apply -var "github_token=$(gh auth token)" + +``` + +# oidc + +see this branch for the pike generated policies +https://github.com/jmikedupont2/terraform-aws-oidc-github/pull/1 + +Latest repos added here: +https://github.com/unfunco/terraform-aws-oidc-github/commit/041576683835e6db4b2a62da7db93f15e52b29ee + +see usage here: +https://github.com/jmikedupont2/swarms-terraform/blob/feature/call_swarms/.github/workflows/call-swarms.yml + +``` +tofu apply -target "module.swarms_api.module.asg_dynamic_new_ami_dev[\"t3.small\"].module.autoscaling.aws_autoscaling_group.this[0]" +``` + +check the build +`aws ecr list-images --profile swarms --region us-east-2 --repository-name swarms/mcs` + + +# policy +see ~/2024/12/18/terraform-aws-oidc-github/examples/complete +arn:aws:iam::916723593639:policy/terraform_pike20241218205424118000000001 +iam_role_arn = "arn:aws:iam::916723593639:role/github diff --git a/accounts/swarms/github/main.tf b/accounts/swarms/github/main.tf new file mode 100644 index 0000000..af0c4af --- /dev/null +++ b/accounts/swarms/github/main.tf @@ -0,0 +1,26 @@ +variable "aws_region" {} +variable "aws_account_id" {} +variable "repos" {} + +terraform { + required_providers { + github = { + source = "integrations/github" + version = "6.4.0" + } + } +} + +resource "github_actions_secret" "region" { + for_each = var.repos + repository = each.key # + secret_name = "AWS_REGION" + plaintext_value = var.aws_region +} + +resource "github_actions_secret" "account" { + for_each = var.repos + repository = each.key + secret_name = "AWS_ACCOUNT_ID" + plaintext_value = var.aws_account_id +} diff --git a/accounts/swarms/main.tf b/accounts/swarms/main.tf new file mode 100644 index 0000000..9609b8d --- /dev/null +++ b/accounts/swarms/main.tf @@ -0,0 +1,133 @@ +locals { + #ami_name = "ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*" + ami_name = "ubuntu-minimal/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-minimal-*" # useast2id= + dns = "api.swarms.ai" + account = "916723593639" + region = "us-east-2" +} + + +variable "owner" { + description = "GitHub owner used to configure the provider" + default = "jmikedupont2" +} + +variable "github_token" { + description = "GitHub access token used to configure the provider" + type = string + default = "" +} + +provider "github" { + owner = var.owner + token = var.github_token +} + +#resource aws_route53_zone test{ +# name = local.dns +#} + +provider "aws" { + region = "us-east-2" + profile = "swarms" +} +#output dns { +# value = local.dns +#} + +#output profile { +# value = "swarms" +#} + +#output account { +# value = "916723593639" +#} + +#output region { +# value = "us-east-2" +#} + +#SLOW +#data "aws_ami" "ami" { # slow +# most_recent = true +# name_regex = "^${local.ami_name}" +# } +locals { + us_east_2_swarms_ami_id = "ami-0325b9a2dfb474b2d" + us_east_2_ami_id = "ami-0325b9a2dfb474b2d" +} + +module "swarms_api" { + aws_account_id = local.account + region = local.region + source = "../../environments/swarms-aws-agent-api/dev/us-east-1" + domain = local.dns + #ami_id = data.aws_ami.ami.id + ami_id = local.us_east_2_swarms_ami_id + + + name = "swarms" + tags = { project = "swarms" } + +} + +module "swarmdeploy" { + count = 0 + source = "../../environments/swarms-deploy/dev/us-east-1" + domain = local.dns + #ami_id = data.aws_ami.ami.id + ami_id = local.us_east_2_swarms_ami_id + name = "swarmdeploy" + tags = { project = "swarmdeploy" } + vpc_id = "vpc-0b4cedd083227068d" + subnet_id = "subnet-04b3bdd4b0dc877f0" + ssm_profile_arn = "arn:aws:iam::916723593639:instance-profile/ssm-swarms-profile" + ssm_profile_name = "ssm-swarms-profile" +} + +#output api { +# value = module.swarms_api +#} + + +# setup the github tokens +module "github" { + source = "./github" + aws_account_id = local.account + aws_region = local.region + # github_token = var.github_token + repos = toset([ + "terraform-aws-oidc-github", + "swarms", + "swarms-terraform" + ]) +} + + +# now create the ssm document +module "call_swarms" { + source = "../../environments/call-swarms" + +} + +module "ssm_observer" { + source = "../../modules/aws/ssm/observability" + ami_id = local.us_east_2_ami_id +} + +module "ssm_setup" { + source = "../../modules/aws/ssm/setup" +} + +output "user_data_new" { + value = module.swarms_api.user_data_new +} + + +module "pdev" { + source = "./pdev" +} + +module "mdupont" { + source = "./mdupont" +} diff --git a/accounts/swarms/main.txt b/accounts/swarms/main.txt new file mode 100644 index 0000000..65f2ed4 --- /dev/null +++ b/accounts/swarms/main.txt @@ -0,0 +1,1769 @@ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.alb.module.alb.data.aws_partition.current: Reading... +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.data.aws_partition.current: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.data.aws_ami.ami: Reading... +module.swarms_api.module.alb.module.acm.aws_route53_record.validation[0]: Refreshing state... [id=Z05433953QQ7Q4A095TXN__2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai._CNAME] +module.swarms_api.module.alb.aws_route53_zone.primary: Refreshing state... [id=Z05433953QQ7Q4A095TXN] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0474ec5cd8a0dd94b] +module.swarms_api.module.alb.module.alb.data.aws_partition.current: Read complete after 0s [id=aws] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.data.aws_partition.current: Read complete after 0s [id=aws] +module.swarms_api.module.alb.module.acm.aws_acm_certificate.this[0]: Refreshing state... [id=arn:aws:acm:us-east-2:767503528736:certificate/e4c7475f-9b0a-471f-b02c-134759b27668] +module.swarms_api.module.alb.data.aws_availability_zones.available: Reading... +module.swarms_api.module.asg_dynamic["t3.medium"].aws_iam_role.ssm: Refreshing state... [id=ssm-swarms-size-t3.medium] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.data.aws_iam_policy_document.assume_role_policy[0]: Reading... +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=1256122602] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_iam_role.this[0]: Refreshing state... [id=ssm-swarms-size-t3.medium-20241216190828467900000002] +module.swarms_api.module.alb.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.asg_dynamic["t3.medium"].aws_iam_instance_profile.ssm: Refreshing state... [id=ssm-swarms-size-t3.medium] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_iam_instance_profile.this[0]: Refreshing state... [id=ssm-swarms-size-t3.medium-20241216190828732900000006] +module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_iam_role_policy_attachment.this["AmazonSSMManagedInstanceCore"]: Refreshing state... [id=ssm-swarms-size-t3.medium-20241216190828467900000002-20241216190828805000000007] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-0834ea0a1cb0b3e20] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-09e45cb7d4cd6e40f] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-094056be43e3ff84b] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-0757b0776826bfbf8] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-01ab4dcce11853b1d] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-0bc4a9913a3ce2359] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-02678c830b74a5b5d] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-0c0af7a5f74ac54cc] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-078545b8b8efb0295] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-062a89232fc0355b2] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0513c8c9e488570c3] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-03f08e37b2201be9c] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0e6c2fef4377b5590] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0bbfe21680e03572b] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-01ab4dcce11853b1d1080289494] +module.swarms_api.module.alb.aws_lb_target_group.this: Refreshing state... [id=arn:aws:elasticloadbalancing:us-east-2:767503528736:targetgroup/swarms2024121619084133320000000c/001ec925380d4bcd] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-037ae6ac8e6a6692b] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-07c9642c84441a2c1] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-08546098c2d85fd6f] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-02e71f6004f433e3e] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-082b7f543579d5f38] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-05cfcd90f6d653fe1] +module.swarms_api.data.aws_ami.ami: Still reading... [10s elapsed] +module.swarms_api.data.aws_ami.ami: Read complete after 13s [id=ami-0ad5d6c7069ce56ac] + +OpenTofu used the selected providers to generate the following execution plan. Resource actions are indicated with +the following symbols: + + create + - destroy + +OpenTofu will perform the following actions: + + # module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template will be created + + resource "aws_launch_template" "ec2_launch_template" { + + arn = (known after apply) + + default_version = (known after apply) + + id = (known after apply) + + image_id = + + instance_type = "t3.medium" + + key_name = "mdupont-deployer-key" + + latest_version = (known after apply) + + name = (known after apply) + + name_prefix = "swarms-size-t3.medium-launch-template-" + + tags = { + + "instance_type" = "t3.medium" + + "name" = "swarms-size-t3.medium" + + "project" = "swarms" + } + + tags_all = { + + "instance_type" = "t3.medium" + + "name" = "swarms-size-t3.medium" + + "project" = "swarms" + } + + user_data = "IyEvYmluL2Jhc2gKZXhwb3J0IEhPTUU9L3Jvb3QKYXB0IHVwZGF0ZQphcHQtZ2V0IGluc3RhbGwgLXkgZWMyLWluc3RhbmNlLWNvbm5lY3QgZ2l0IHZpcnR1YWxlbnYKc25hcCBpbnN0YWxsIGFtYXpvbi1zc20tYWdlbnQgLS1jbGFzc2ljIHx8IGVjaG8gb29wczEKc25hcCBzdGFydCBhbWF6b24tc3NtLWFnZW50IHx8IGVjaG8gb29wczIKYXB0LWdldCBpbnN0YWxsIC15IC0tbm8taW5zdGFsbC1yZWNvbW1lbmRzIGNhLWNlcnRpZmljYXRlcz0yMDIzMDMxMSBjdXJsPTcuODguMS0xMCtkZWIxMnU3IHwgIGVjaG8gb29wcwpjdXJsIC1PICJodHRwczovL3MzLmFtYXpvbmF3cy5jb20vYW1hem9uY2xvdWR3YXRjaC1hZ2VudC91YnVudHUvJChkcGtnIC0tcHJpbnQtYXJjaGl0ZWN0dXJlKS9sYXRlc3QvYW1hem9uLWNsb3Vkd2F0Y2gtYWdlbnQuZGViIgpkcGtnIC1pIC1FIGFtYXpvbi1jbG91ZHdhdGNoLWFnZW50LmRlYgogCmlmIFsgISAtZCAiL29wdC9zd2FybXMvIiBdOyB0aGVuCiAgZ2l0IGNsb25lIGh0dHBzOi8vZ2l0aHViLmNvbS9qbWlrZWR1cG9udDIvc3dhcm1zICIvb3B0L3N3YXJtcy8iCmZpCmNkICIvb3B0L3N3YXJtcy8iIHx8IGV4aXQgMQpleHBvcnQgQlJBTkNIPWZlYXR1cmUvZWMyCmdpdCBzdGFzaApnaXQgY2hlY2tvdXQgLS1mb3JjZSAkQlJBTkNICmJhc2ggLXggL29wdC9zd2FybXMvYXBpL2luc3RhbGwuc2gK" + + + block_device_mappings { + + device_name = "/dev/sda1" + + + ebs { + + encrypted = "true" + + iops = (known after apply) + + throughput = (known after apply) + + volume_size = 30 + + volume_type = "gp3" + } + } + + + iam_instance_profile { + + name = "swarms-20241213150629570500000003" + } + + + network_interfaces { + + associate_public_ip_address = "true" + + delete_on_termination = "true" + + security_groups = (known after apply) + } + } + + # module.swarms_api.module.alb.module.acm.aws_acm_certificate.this[0] will be destroyed + # (because aws_acm_certificate.this is not in configuration) + - resource "aws_acm_certificate" "this" { + - arn = "arn:aws:acm:us-east-2:767503528736:certificate/e4c7475f-9b0a-471f-b02c-134759b27668" -> null + - domain_name = "api.swarms.ai" -> null + - domain_validation_options = [ + - { + - domain_name = "*.api.swarms.ai" + - resource_record_name = "_2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai." + - resource_record_type = "CNAME" + - resource_record_value = "_14001873a994d4ba61251844ed588611.zfyfvmchrl.acm-validations.aws." + }, + - { + - domain_name = "api.swarms.ai" + - resource_record_name = "_2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai." + - resource_record_type = "CNAME" + - resource_record_value = "_14001873a994d4ba61251844ed588611.zfyfvmchrl.acm-validations.aws." + }, + ] -> null + - id = "arn:aws:acm:us-east-2:767503528736:certificate/e4c7475f-9b0a-471f-b02c-134759b27668" -> null + - key_algorithm = "RSA_2048" -> null + - pending_renewal = false -> null + - renewal_eligibility = "INELIGIBLE" -> null + - renewal_summary = [] -> null + - status = "PENDING_VALIDATION" -> null + - subject_alternative_names = [ + - "*.api.swarms.ai", + - "api.swarms.ai", + ] -> null + - tags = {} -> null + - tags_all = {} -> null + - type = "AMAZON_ISSUED" -> null + - validation_emails = [] -> null + - validation_method = "DNS" -> null + + - options { + - certificate_transparency_logging_preference = "ENABLED" -> null + } + } + + # module.swarms_api.module.alb.module.acm.aws_route53_record.validation[0] will be destroyed + # (because aws_route53_record.validation is not in configuration) + - resource "aws_route53_record" "validation" { + - allow_overwrite = true -> null + - fqdn = "_2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai" -> null + - id = "Z05433953QQ7Q4A095TXN__2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai._CNAME" -> null + - multivalue_answer_routing_policy = false -> null + - name = "_2d35674ee842f81e6c1d4b1ff81bf202.api.swarms.ai" -> null + - records = [ + - "_14001873a994d4ba61251844ed588611.zfyfvmchrl.acm-validations.aws.", + ] -> null + - ttl = 60 -> null + - type = "CNAME" -> null + - zone_id = "Z05433953QQ7Q4A095TXN" -> null + } + + # module.swarms_api.module.alb.module.alb.aws_lb.this[0] will be created + + resource "aws_lb" "this" { + + arn = (known after apply) + + arn_suffix = (known after apply) + + client_keep_alive = 7200 + + desync_mitigation_mode = "defensive" + + dns_name = (known after apply) + + drop_invalid_header_fields = true + + enable_deletion_protection = false + + enable_http2 = true + + enable_tls_version_and_cipher_suite_headers = false + + enable_waf_fail_open = false + + enable_xff_client_port = false + + enforce_security_group_inbound_rules_on_private_link_traffic = (known after apply) + + id = (known after apply) + + idle_timeout = 60 + + internal = (known after apply) + + ip_address_type = (known after apply) + + load_balancer_type = "application" + + name = "swarms-api" + + name_prefix = (known after apply) + + preserve_host_header = false + + security_groups = (known after apply) + + subnets = [ + + "1", + ] + + tags = { + + "Example" = "ex-swarms" + + "Name" = "ex-swarms" + + "Repository" = "https://github.com/terraform-aws-modules/terraform-aws-alb" + + "terraform-aws-modules" = "alb" + } + + tags_all = { + + "Example" = "ex-swarms" + + "Name" = "ex-swarms" + + "Repository" = "https://github.com/terraform-aws-modules/terraform-aws-alb" + + "terraform-aws-modules" = "alb" + } + + vpc_id = (known after apply) + + xff_header_processing_mode = "append" + + zone_id = (known after apply) + + + timeouts {} + } + + # module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_autoscaling_group.this[0] will be created + + resource "aws_autoscaling_group" "this" { + + arn = (known after apply) + + availability_zones = (known after apply) + + default_cooldown = (known after apply) + + desired_capacity = 1 + + force_delete = false + + force_delete_warm_pool = false + + health_check_grace_period = 300 + + health_check_type = "EC2" + + id = (known after apply) + + ignore_failed_scaling_activities = false + + load_balancers = (known after apply) + + max_size = 5 + + metrics_granularity = "1Minute" + + min_size = 1 + + name = (known after apply) + + name_prefix = "swarms-size-t3.medium-" + + predicted_capacity = (known after apply) + + protect_from_scale_in = false + + service_linked_role_arn = (known after apply) + + target_group_arns = (known after apply) + + termination_policies = [] + + vpc_zone_identifier = [ + + "subnet-057c90cfe7b2e5646", + ] + + wait_for_capacity_timeout = "10m" + + warm_pool_size = (known after apply) + + + launch_template { + + id = (known after apply) + + name = (known after apply) + + version = "$Latest" + } + + + tag { + + key = "Name" + + propagate_at_launch = true + + value = "swarms-size-t3.medium" + } + + + timeouts {} + } + + # module.swarms_api.module.asg_dynamic["t3.medium"].module.autoscaling.aws_autoscaling_traffic_source_attachment.this["ex-alb"] will be created + + resource "aws_autoscaling_traffic_source_attachment" "this" { + + autoscaling_group_name = (known after apply) + + id = (known after apply) + + + traffic_source { + + identifier = "arn:aws:elasticloadbalancing:us-east-2:767503528736:targetgroup/swarms2024121619084133320000000c/001ec925380d4bcd" + + type = "elbv2" + } + } + + # module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0] will be created + + resource "aws_security_group" "this_name_prefix" { + + arn = (known after apply) + + description = "external group" + + egress = (known after apply) + + id = (known after apply) + + ingress = (known after apply) + + name = (known after apply) + + name_prefix = "swarms-external-" + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Name" = "swarms-external" + + "project" = "swarms" + } + + tags_all = { + + "Name" = "swarms-external" + + "project" = "swarms" + } + + vpc_id = "vpc-04f28c9347af48b55" + + + timeouts { + + create = "10m" + + delete = "15m" + } + } + + # module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0] will be created + + resource "aws_security_group_rule" "egress_rules" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "All protocols" + + from_port = -1 + + id = (known after apply) + + ipv6_cidr_blocks = [ + + "::/0", + ] + + prefix_list_ids = [] + + protocol = "-1" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = -1 + + type = "egress" + } + + # module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0] will be created + + resource "aws_security_group_rule" "ingress_rules" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "HTTPS" + + from_port = 443 + + id = (known after apply) + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 443 + + type = "ingress" + } + + # module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1] will be created + + resource "aws_security_group_rule" "ingress_rules" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "HTTP" + + from_port = 80 + + id = (known after apply) + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 80 + + type = "ingress" + } + + # module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0] will be created + + resource "aws_security_group" "this_name_prefix" { + + arn = (known after apply) + + description = "An internal security group" + + egress = (known after apply) + + id = (known after apply) + + ingress = (known after apply) + + name = (known after apply) + + name_prefix = "swarms-internal-" + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Name" = "swarms-internal" + + "project" = "swarms" + } + + tags_all = { + + "Name" = "swarms-internal" + + "project" = "swarms" + } + + vpc_id = "vpc-04f28c9347af48b55" + + + timeouts { + + create = "10m" + + delete = "15m" + } + } + + # module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0] will be created + + resource "aws_security_group_rule" "egress_rules" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "All protocols" + + from_port = -1 + + id = (known after apply) + + ipv6_cidr_blocks = [ + + "::/0", + ] + + prefix_list_ids = [] + + protocol = "-1" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = -1 + + type = "egress" + } + + # module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0] will be created + + resource "aws_security_group_rule" "ingress_with_source_security_group_id" { + + description = "Ingress Rule" + + from_port = 80 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 80 + + type = "ingress" + } + + + + +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply -auto-approve +module.swarms_api.module.alb.module.route53.data.aws_route53_zone.primary: Reading... +module.swarms_api.module.alb.module.alb.data.aws_partition.current: Reading... +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.alb.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.alb.module.alb.data.aws_partition.current: Read complete after 0s [id=aws] +module.swarms_api.module.alb.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.alb.module.route53.data.aws_route53_zone.primary: Read complete after 0s [id=Z04162952OP7P14Z97UWY] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.alb.module.tg.aws_lb_target_group.this: Refreshing state... [id=arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] + +OpenTofu used the selected providers to generate the following execution plan. Resource actions are indicated with +the following symbols: + + create + +OpenTofu will perform the following actions: + + # module.swarms_api.module.alb.module.alb.aws_lb.this[0] will be created + + resource "aws_lb" "this" { + + arn = (known after apply) + + arn_suffix = (known after apply) + + client_keep_alive = 7200 + + desync_mitigation_mode = "defensive" + + dns_name = (known after apply) + + drop_invalid_header_fields = true + + enable_deletion_protection = false + + enable_http2 = true + + enable_tls_version_and_cipher_suite_headers = false + + enable_waf_fail_open = false + + enable_xff_client_port = false + + enforce_security_group_inbound_rules_on_private_link_traffic = (known after apply) + + id = (known after apply) + + idle_timeout = 60 + + internal = (known after apply) + + ip_address_type = (known after apply) + + load_balancer_type = "application" + + name = "swarms-api" + + name_prefix = (known after apply) + + preserve_host_header = false + + security_groups = [ + + "sg-09e0227357b33ab1e", + ] + + subnets = [ + + "1", + ] + + tags = { + + "Example" = "ex-swarms" + + "Name" = "ex-swarms" + + "Repository" = "https://github.com/terraform-aws-modules/terraform-aws-alb" + + "terraform-aws-modules" = "alb" + } + + tags_all = { + + "Example" = "ex-swarms" + + "Name" = "ex-swarms" + + "Repository" = "https://github.com/terraform-aws-modules/terraform-aws-alb" + + "terraform-aws-modules" = "alb" + } + + vpc_id = (known after apply) + + xff_header_processing_mode = "append" + + zone_id = (known after apply) + + + timeouts {} + } + + # module.swarms_api.module.alb.module.route53.aws_route53_record.api-cname will be created + + resource "aws_route53_record" "api-cname" { + + allow_overwrite = (known after apply) + + fqdn = (known after apply) + + id = (known after apply) + + name = "api.swarms.ai" + + records = (known after apply) + + ttl = 5 + + type = "CNAME" + + zone_id = "Z04162952OP7P14Z97UWY" + } + +Plan: 2 to add, 0 to change, 0 to destroy. +module.swarms_api.module.alb.module.alb.aws_lb.this[0]: Creating... +╷ +│ Error: creating ELBv2 application Load Balancer (swarms-api): operation error Elastic Load Balancing v2: CreateLoadBalancer, https response error StatusCode: 400, RequestID: 33d4b4ca-900e-4148-989a-932978bd7bfc, InvalidSubnet: The subnet ID '1' is not valid +│ +│ with module.swarms_api.module.alb.module.alb.aws_lb.this[0], +│ on .terraform/modules/swarms_api.alb.alb/main.tf line 12, in resource "aws_lb" "this": +│ 12: resource "aws_lb" "this" { +│ +╵ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply -auto-approve +╷ +│ Error: Reference to undeclared module +│ +│ on ../../environments/swarms-aws-agent-api/dev/us-east-1/main.tf line 107, in output "alb": +│ 107: value = module.alb +│ +│ No module call named "alb" is declared in module.swarms_api. +╵ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply -auto-approve +╷ +│ Error: Reference to undeclared module +│ +│ on ../../environments/swarms-aws-agent-api/dev/us-east-1/main.tf line 107, in output "alb": +│ 107: value = module.alb +│ +│ No module call named "alb" is declared in module.swarms_api. +╵ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply -auto-approve +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.alb.module.tg.aws_lb_target_group.this: Refreshing state... [id=arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] + +OpenTofu used the selected providers to generate the following execution plan. Resource actions are indicated with +the following symbols: + - destroy + +OpenTofu will perform the following actions: + + # module.swarms_api.module.alb.module.tg.aws_lb_target_group.this will be destroyed + # (because aws_lb_target_group.this is not in configuration) + - resource "aws_lb_target_group" "this" { + - arn = "arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771" -> null + - arn_suffix = "targetgroup/swarms2024121620041957850000000a/e915743119e96771" -> null + - deregistration_delay = "10" -> null + - id = "arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771" -> null + - ip_address_type = "ipv4" -> null + - lambda_multi_value_headers_enabled = false -> null + - load_balancer_arns = [] -> null + - load_balancing_algorithm_type = "round_robin" -> null + - load_balancing_anomaly_mitigation = "off" -> null + - load_balancing_cross_zone_enabled = "use_load_balancer_configuration" -> null + - name = "swarms2024121620041957850000000a" -> null + - name_prefix = "swarms" -> null + - port = 80 -> null + - protocol = "HTTP" -> null + - protocol_version = "HTTP1" -> null + - proxy_protocol_v2 = false -> null + - slow_start = 0 -> null + - tags = {} -> null + - tags_all = {} -> null + - target_type = "instance" -> null + - vpc_id = "vpc-0b4cedd083227068d" -> null + + - health_check { + - enabled = true -> null + - healthy_threshold = 10 -> null + - interval = 130 -> null + - matcher = "200" -> null + - path = "/v1/docs" -> null + - port = "traffic-port" -> null + - protocol = "HTTP" -> null + - timeout = 120 -> null + - unhealthy_threshold = 10 -> null + } + + - stickiness { + - cookie_duration = 86400 -> null + - enabled = false -> null + - type = "lb_cookie" -> null + } + + - target_failover {} + + - target_group_health { + - dns_failover { + - minimum_healthy_targets_count = "1" -> null + - minimum_healthy_targets_percentage = "off" -> null + } + - unhealthy_state_routing { + - minimum_healthy_targets_count = 1 -> null + - minimum_healthy_targets_percentage = "off" -> null + } + } + + - target_health_state {} + } + +Plan: 0 to add, 0 to change, 1 to destroy. +module.swarms_api.module.alb.module.tg.aws_lb_target_group.this: Destroying... [id=arn:aws:elasticloadbalancing:us-east-2:916723593639:targetgroup/swarms2024121620041957850000000a/e915743119e96771] +module.swarms_api.module.alb.module.tg.aws_lb_target_group.this: Destruction complete after 1s + +Apply complete! Resources: 0 added, 0 changed, 1 destroyed. + +Outputs: + +account = "916723593639" +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu output +account = "916723593639" +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu output +account = "916723593639" +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] + +Changes to Outputs: + + api = { + + security_group_id = "sg-09e0227357b33ab1e" + } + +You can apply this plan to save these new output values to the OpenTofu state, without changing any real +infrastructure. + +Do you want to perform these actions? + OpenTofu will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +account = "916723593639" +api = { + "security_group_id" = "sg-09e0227357b33ab1e" +} +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] + +No changes. Your infrastructure matches the configuration. + +OpenTofu has compared your real infrastructure against your configuration and found no differences, so no changes are +needed. + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +account = "916723593639" +api = { + "security_group_id" = "sg-09e0227357b33ab1e" +} +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ tofu apply +module.swarms_api.module.vpc.data.aws_availability_zones.available: Reading... +module.swarms_api.module.kp.aws_key_pair.mdupont-deployer: Refreshing state... [id=mdupont-deployer-key] +module.swarms_api.module.vpc.module.vpc.aws_vpc.this[0]: Refreshing state... [id=vpc-0b4cedd083227068d] +module.swarms_api.module.vpc.data.aws_availability_zones.available: Read complete after 0s [id=us-east-2] +module.swarms_api.module.vpc.module.vpc.aws_default_route_table.default[0]: Refreshing state... [id=rtb-053dc8c3d37f2f58f] +module.swarms_api.module.vpc.module.vpc.aws_default_security_group.this[0]: Refreshing state... [id=sg-0a42c3ef338285431] +module.swarms_api.module.vpc.module.vpc.aws_default_network_acl.this[0]: Refreshing state... [id=acl-07d9084294e0492bd] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[2]: Refreshing state... [id=subnet-0e926a2b5ae3f5acc] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[1]: Refreshing state... [id=subnet-0de17633cfb166d29] +module.swarms_api.module.vpc.module.vpc.aws_subnet.private[0]: Refreshing state... [id=subnet-0ea44af2f97e12b1a] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[0]: Refreshing state... [id=subnet-04b3bdd4b0dc877f0] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[1]: Refreshing state... [id=subnet-051fecdee9028af79] +module.swarms_api.module.vpc.module.vpc.aws_subnet.public[2]: Refreshing state... [id=subnet-027f4abd09a6543f0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[2]: Refreshing state... [id=rtb-081f2bb265d2703ee] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[0]: Refreshing state... [id=rtb-033eec802f743baca] +module.swarms_api.module.vpc.module.vpc.aws_route_table.private[1]: Refreshing state... [id=rtb-0bbc3caa1cdc40cd0] +module.swarms_api.module.vpc.module.vpc.aws_internet_gateway.this[0]: Refreshing state... [id=igw-022da11de86f41ae0] +module.swarms_api.module.vpc.module.vpc.aws_route_table.public[0]: Refreshing state... [id=rtb-0445a2ea1c510657b] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-0516329c97694b300] +module.swarms_api.module.security.module.asg_sg.aws_security_group.this_name_prefix[0]: Refreshing state... [id=sg-09e0227357b33ab1e] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[0]: Refreshing state... [id=rtbassoc-06f87a2b8106090d9] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[2]: Refreshing state... [id=rtbassoc-0657562cf0177710a] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.private[1]: Refreshing state... [id=rtbassoc-085a04c2858a3164e] +module.swarms_api.module.vpc.module.vpc.aws_route.public_internet_gateway[0]: Refreshing state... [id=r-rtb-0445a2ea1c510657b1080289494] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[0]: Refreshing state... [id=rtbassoc-033c4fa608f79a594] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[1]: Refreshing state... [id=rtbassoc-000d8799150d904e6] +module.swarms_api.module.vpc.module.vpc.aws_route_table_association.public[2]: Refreshing state... [id=rtbassoc-0b047c79fe554852d] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-1938904650] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.egress_rules[0]: Refreshing state... [id=sgrule-2310235202] +module.swarms_api.module.security.module.asg_sg_internal.aws_security_group_rule.ingress_with_source_security_group_id[0]: Refreshing state... [id=sgrule-513653582] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[0]: Refreshing state... [id=sgrule-2188377888] +module.swarms_api.module.lt_dynamic["t3.medium"].aws_launch_template.ec2_launch_template: Refreshing state... [id=lt-0ef5358bfbb143f2d] +module.swarms_api.module.security.module.asg_sg.aws_security_group_rule.ingress_rules[1]: Refreshing state... [id=sgrule-1557830365] + +Changes to Outputs: + ~ api = { + + vpc = { + + cgw_arns = [] + + cgw_ids = [] + + default_network_acl_id = "acl-07d9084294e0492bd" + + default_route_table_id = "rtb-053dc8c3d37f2f58f" + + default_security_group_id = "sg-0a42c3ef338285431" + + default_vpc_arn = null + + default_vpc_cidr_block = null + + default_vpc_default_network_acl_id = null + + default_vpc_default_route_table_id = null + + default_vpc_default_security_group_id = null + + default_vpc_enable_dns_hostnames = null + + default_vpc_enable_dns_support = null + + default_vpc_id = null + + default_vpc_instance_tenancy = null + + default_vpc_main_route_table_id = null + + dhcp_options_id = null + + ec2_public_subnet_id_1 = 1 + + ec2_public_subnet_id_2 = 1 + + egress_only_internet_gateway_id = null + + igw_arn = "arn:aws:ec2:us-east-2:916723593639:internet-gateway/igw-022da11de86f41ae0" + + igw_id = "igw-022da11de86f41ae0" + + nat_ids = [] + + nat_public_ips = [] + + natgw_ids = [] + + private_ipv6_egress_route_ids = [] + + private_nat_gateway_route_ids = [] + + private_network_acl_arn = null + + private_network_acl_id = null + + private_route_table_association_ids = [ + + "rtbassoc-06f87a2b8106090d9", + + "rtbassoc-085a04c2858a3164e", + + "rtbassoc-0657562cf0177710a", + ] + + private_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + private_subnet_arns = [ + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc", + ] + + private_subnets = [ + + "subnet-0ea44af2f97e12b1a", + + "subnet-0de17633cfb166d29", + + "subnet-0e926a2b5ae3f5acc", + ] + + private_subnets_cidr_blocks = [ + + "10.0.0.0/24", + + "10.0.1.0/24", + + "10.0.2.0/24", + ] + + private_subnets_ipv6_cidr_blocks = [] + + public_internet_gateway_ipv6_route_id = null + + public_internet_gateway_route_id = "r-rtb-0445a2ea1c510657b1080289494" + + public_network_acl_arn = null + + public_network_acl_id = null + + public_route_table_association_ids = [ + + "rtbassoc-033c4fa608f79a594", + + "rtbassoc-000d8799150d904e6", + + "rtbassoc-0b047c79fe554852d", + ] + + public_route_table_ids = [ + + "rtb-0445a2ea1c510657b", + ] + + public_subnet_arns = [ + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0", + ] + + public_subnets = [ + + "subnet-04b3bdd4b0dc877f0", + + "subnet-051fecdee9028af79", + + "subnet-027f4abd09a6543f0", + ] + + public_subnets_cidr_blocks = [ + + "10.0.4.0/24", + + "10.0.5.0/24", + + "10.0.6.0/24", + ] + + public_subnets_ipv6_cidr_blocks = [] + + this_customer_gateway = {} + + vgw_arn = null + + vgw_id = null + + vpc = { + + azs = [ + + "us-east-2a", + + "us-east-2b", + + "us-east-2c", + ] + + cgw_arns = [] + + cgw_ids = [] + + database_internet_gateway_route_id = null + + database_ipv6_egress_route_id = null + + database_nat_gateway_route_ids = [] + + database_network_acl_arn = null + + database_network_acl_id = null + + database_route_table_association_ids = [] + + database_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + database_subnet_arns = [] + + database_subnet_group = null + + database_subnet_group_name = null + + database_subnet_objects = [] + + database_subnets = [] + + database_subnets_cidr_blocks = [] + + database_subnets_ipv6_cidr_blocks = [] + + default_network_acl_id = "acl-07d9084294e0492bd" + + default_route_table_id = "rtb-053dc8c3d37f2f58f" + + default_security_group_id = "sg-0a42c3ef338285431" + + default_vpc_arn = null + + default_vpc_cidr_block = null + + default_vpc_default_network_acl_id = null + + default_vpc_default_route_table_id = null + + default_vpc_default_security_group_id = null + + default_vpc_enable_dns_hostnames = null + + default_vpc_enable_dns_support = null + + default_vpc_id = null + + default_vpc_instance_tenancy = null + + default_vpc_main_route_table_id = null + + dhcp_options_id = null + + egress_only_internet_gateway_id = null + + elasticache_network_acl_arn = null + + elasticache_network_acl_id = null + + elasticache_route_table_association_ids = [] + + elasticache_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + elasticache_subnet_arns = [] + + elasticache_subnet_group = null + + elasticache_subnet_group_name = null + + elasticache_subnet_objects = [] + + elasticache_subnets = [] + + elasticache_subnets_cidr_blocks = [] + + elasticache_subnets_ipv6_cidr_blocks = [] + + igw_arn = "arn:aws:ec2:us-east-2:916723593639:internet-gateway/igw-022da11de86f41ae0" + + igw_id = "igw-022da11de86f41ae0" + + intra_network_acl_arn = null + + intra_network_acl_id = null + + intra_route_table_association_ids = [] + + intra_route_table_ids = [] + + intra_subnet_arns = [] + + intra_subnet_objects = [] + + intra_subnets = [] + + intra_subnets_cidr_blocks = [] + + intra_subnets_ipv6_cidr_blocks = [] + + name = "swarms" + + nat_ids = [] + + nat_public_ips = [] + + natgw_ids = [] + + natgw_interface_ids = [] + + outpost_network_acl_arn = null + + outpost_network_acl_id = null + + outpost_subnet_arns = [] + + outpost_subnet_objects = [] + + outpost_subnets = [] + + outpost_subnets_cidr_blocks = [] + + outpost_subnets_ipv6_cidr_blocks = [] + + private_ipv6_egress_route_ids = [] + + private_nat_gateway_route_ids = [] + + private_network_acl_arn = null + + private_network_acl_id = null + + private_route_table_association_ids = [ + + "rtbassoc-06f87a2b8106090d9", + + "rtbassoc-085a04c2858a3164e", + + "rtbassoc-0657562cf0177710a", + ] + + private_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + private_subnet_arns = [ + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc", + ] + + private_subnet_objects = [ + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2a" + + availability_zone_id = "use2-az1" + + cidr_block = "10.0.0.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-0ea44af2f97e12b1a" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "Private Subnet One" + + project = "swarms" + } + + tags_all = { + + Name = "Private Subnet One" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2b" + + availability_zone_id = "use2-az2" + + cidr_block = "10.0.1.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-0de17633cfb166d29" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "Private Subnet Two" + + project = "swarms" + } + + tags_all = { + + Name = "Private Subnet Two" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2c" + + availability_zone_id = "use2-az3" + + cidr_block = "10.0.2.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-0e926a2b5ae3f5acc" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "swarms-private-us-east-2c" + + project = "swarms" + } + + tags_all = { + + Name = "swarms-private-us-east-2c" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + ] + + private_subnets = [ + + "subnet-0ea44af2f97e12b1a", + + "subnet-0de17633cfb166d29", + + "subnet-0e926a2b5ae3f5acc", + ] + + private_subnets_cidr_blocks = [ + + "10.0.0.0/24", + + "10.0.1.0/24", + + "10.0.2.0/24", + ] + + private_subnets_ipv6_cidr_blocks = [] + + public_internet_gateway_ipv6_route_id = null + + public_internet_gateway_route_id = "r-rtb-0445a2ea1c510657b1080289494" + + public_network_acl_arn = null + + public_network_acl_id = null + + public_route_table_association_ids = [ + + "rtbassoc-033c4fa608f79a594", + + "rtbassoc-000d8799150d904e6", + + "rtbassoc-0b047c79fe554852d", + ] + + public_route_table_ids = [ + + "rtb-0445a2ea1c510657b", + ] + + public_subnet_arns = [ + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79", + + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0", + ] + + public_subnet_objects = [ + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2a" + + availability_zone_id = "use2-az1" + + cidr_block = "10.0.4.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-04b3bdd4b0dc877f0" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "swarms-public-us-east-2a" + + project = "swarms" + } + + tags_all = { + + Name = "swarms-public-us-east-2a" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2b" + + availability_zone_id = "use2-az2" + + cidr_block = "10.0.5.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-051fecdee9028af79" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "swarms-public-us-east-2b" + + project = "swarms" + } + + tags_all = { + + Name = "swarms-public-us-east-2b" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + + { + + arn = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0" + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-2c" + + availability_zone_id = "use2-az3" + + cidr_block = "10.0.6.0/24" + + customer_owned_ipv4_pool = "" + + enable_dns64 = false + + enable_lni_at_device_index = 0 + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = "subnet-027f4abd09a6543f0" + + ipv6_cidr_block = "" + + ipv6_cidr_block_association_id = "" + + ipv6_native = false + + map_customer_owned_ip_on_launch = false + + map_public_ip_on_launch = false + + outpost_arn = "" + + owner_id = "916723593639" + + private_dns_hostname_type_on_launch = "ip-name" + + tags = { + + Name = "swarms-public-us-east-2c" + + project = "swarms" + } + + tags_all = { + + Name = "swarms-public-us-east-2c" + + project = "swarms" + } + + timeouts = null + + vpc_id = "vpc-0b4cedd083227068d" + }, + ] + + public_subnets = [ + + "subnet-04b3bdd4b0dc877f0", + + "subnet-051fecdee9028af79", + + "subnet-027f4abd09a6543f0", + ] + + public_subnets_cidr_blocks = [ + + "10.0.4.0/24", + + "10.0.5.0/24", + + "10.0.6.0/24", + ] + + public_subnets_ipv6_cidr_blocks = [] + + redshift_network_acl_arn = null + + redshift_network_acl_id = null + + redshift_public_route_table_association_ids = [] + + redshift_route_table_association_ids = [] + + redshift_route_table_ids = [ + + "rtb-033eec802f743baca", + + "rtb-0bbc3caa1cdc40cd0", + + "rtb-081f2bb265d2703ee", + ] + + redshift_subnet_arns = [] + + redshift_subnet_group = null + + redshift_subnet_objects = [] + + redshift_subnets = [] + + redshift_subnets_cidr_blocks = [] + + redshift_subnets_ipv6_cidr_blocks = [] + + this_customer_gateway = {} + + vgw_arn = null + + vgw_id = null + + vpc_arn = "arn:aws:ec2:us-east-2:916723593639:vpc/vpc-0b4cedd083227068d" + + vpc_cidr_block = "10.0.0.0/16" + + vpc_enable_dns_hostnames = true + + vpc_enable_dns_support = true + + vpc_flow_log_cloudwatch_iam_role_arn = "" + + vpc_flow_log_deliver_cross_account_role = null + + vpc_flow_log_destination_arn = "" + + vpc_flow_log_destination_type = "cloud-watch-logs" + + vpc_flow_log_id = null + + vpc_id = "vpc-0b4cedd083227068d" + + vpc_instance_tenancy = "default" + + vpc_ipv6_association_id = "" + + vpc_ipv6_cidr_block = "" + + vpc_main_route_table_id = "rtb-053dc8c3d37f2f58f" + + vpc_owner_id = "916723593639" + + vpc_secondary_cidr_blocks = [] + } + + vpc_arn = "arn:aws:ec2:us-east-2:916723593639:vpc/vpc-0b4cedd083227068d" + + vpc_cidr_block = "10.0.0.0/16" + + vpc_enable_dns_hostnames = true + + vpc_enable_dns_support = true + + vpc_flow_log_cloudwatch_iam_role_arn = "" + + vpc_flow_log_destination_arn = "" + + vpc_flow_log_destination_type = "cloud-watch-logs" + + vpc_flow_log_id = null + + vpc_id = "vpc-0b4cedd083227068d" + + vpc_instance_tenancy = "default" + + vpc_ipv6_association_id = "" + + vpc_ipv6_cidr_block = "" + + vpc_main_route_table_id = "rtb-053dc8c3d37f2f58f" + + vpc_owner_id = "916723593639" + + vpc_secondary_cidr_blocks = [] + } + # (1 unchanged attribute hidden) + } + +You can apply this plan to save these new output values to the OpenTofu state, without changing any real +infrastructure. + +Do you want to perform these actions? + OpenTofu will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +account = "916723593639" +api = { + "security_group_id" = "sg-09e0227357b33ab1e" + "vpc" = { + "cgw_arns" = [] + "cgw_ids" = [] + "default_network_acl_id" = "acl-07d9084294e0492bd" + "default_route_table_id" = "rtb-053dc8c3d37f2f58f" + "default_security_group_id" = "sg-0a42c3ef338285431" + "default_vpc_arn" = null + "default_vpc_cidr_block" = null + "default_vpc_default_network_acl_id" = null + "default_vpc_default_route_table_id" = null + "default_vpc_default_security_group_id" = null + "default_vpc_enable_dns_hostnames" = null + "default_vpc_enable_dns_support" = null + "default_vpc_id" = null + "default_vpc_instance_tenancy" = null + "default_vpc_main_route_table_id" = null + "dhcp_options_id" = null + "ec2_public_subnet_id_1" = 1 + "ec2_public_subnet_id_2" = 1 + "egress_only_internet_gateway_id" = null + "igw_arn" = "arn:aws:ec2:us-east-2:916723593639:internet-gateway/igw-022da11de86f41ae0" + "igw_id" = "igw-022da11de86f41ae0" + "nat_ids" = [] + "nat_public_ips" = tolist([]) + "natgw_ids" = [] + "private_ipv6_egress_route_ids" = [] + "private_nat_gateway_route_ids" = [] + "private_network_acl_arn" = null + "private_network_acl_id" = null + "private_route_table_association_ids" = [ + "rtbassoc-06f87a2b8106090d9", + "rtbassoc-085a04c2858a3164e", + "rtbassoc-0657562cf0177710a", + ] + "private_route_table_ids" = [ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ] + "private_subnet_arns" = [ + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc", + ] + "private_subnets" = [ + "subnet-0ea44af2f97e12b1a", + "subnet-0de17633cfb166d29", + "subnet-0e926a2b5ae3f5acc", + ] + "private_subnets_cidr_blocks" = tolist([ + "10.0.0.0/24", + "10.0.1.0/24", + "10.0.2.0/24", + ]) + "private_subnets_ipv6_cidr_blocks" = tolist([]) + "public_internet_gateway_ipv6_route_id" = null + "public_internet_gateway_route_id" = "r-rtb-0445a2ea1c510657b1080289494" + "public_network_acl_arn" = null + "public_network_acl_id" = null + "public_route_table_association_ids" = [ + "rtbassoc-033c4fa608f79a594", + "rtbassoc-000d8799150d904e6", + "rtbassoc-0b047c79fe554852d", + ] + "public_route_table_ids" = [ + "rtb-0445a2ea1c510657b", + ] + "public_subnet_arns" = [ + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0", + ] + "public_subnets" = [ + "subnet-04b3bdd4b0dc877f0", + "subnet-051fecdee9028af79", + "subnet-027f4abd09a6543f0", + ] + "public_subnets_cidr_blocks" = tolist([ + "10.0.4.0/24", + "10.0.5.0/24", + "10.0.6.0/24", + ]) + "public_subnets_ipv6_cidr_blocks" = tolist([]) + "this_customer_gateway" = {} + "vgw_arn" = null + "vgw_id" = null + "vpc" = { + "azs" = tolist([ + "us-east-2a", + "us-east-2b", + "us-east-2c", + ]) + "cgw_arns" = [] + "cgw_ids" = [] + "database_internet_gateway_route_id" = null + "database_ipv6_egress_route_id" = null + "database_nat_gateway_route_ids" = [] + "database_network_acl_arn" = null + "database_network_acl_id" = null + "database_route_table_association_ids" = [] + "database_route_table_ids" = tolist([ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ]) + "database_subnet_arns" = [] + "database_subnet_group" = null + "database_subnet_group_name" = null + "database_subnet_objects" = [] + "database_subnets" = [] + "database_subnets_cidr_blocks" = tolist([]) + "database_subnets_ipv6_cidr_blocks" = tolist([]) + "default_network_acl_id" = "acl-07d9084294e0492bd" + "default_route_table_id" = "rtb-053dc8c3d37f2f58f" + "default_security_group_id" = "sg-0a42c3ef338285431" + "default_vpc_arn" = null + "default_vpc_cidr_block" = null + "default_vpc_default_network_acl_id" = null + "default_vpc_default_route_table_id" = null + "default_vpc_default_security_group_id" = null + "default_vpc_enable_dns_hostnames" = null + "default_vpc_enable_dns_support" = null + "default_vpc_id" = null + "default_vpc_instance_tenancy" = null + "default_vpc_main_route_table_id" = null + "dhcp_options_id" = null + "egress_only_internet_gateway_id" = null + "elasticache_network_acl_arn" = null + "elasticache_network_acl_id" = null + "elasticache_route_table_association_ids" = [] + "elasticache_route_table_ids" = [ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ] + "elasticache_subnet_arns" = [] + "elasticache_subnet_group" = null + "elasticache_subnet_group_name" = null + "elasticache_subnet_objects" = [] + "elasticache_subnets" = [] + "elasticache_subnets_cidr_blocks" = tolist([]) + "elasticache_subnets_ipv6_cidr_blocks" = tolist([]) + "igw_arn" = "arn:aws:ec2:us-east-2:916723593639:internet-gateway/igw-022da11de86f41ae0" + "igw_id" = "igw-022da11de86f41ae0" + "intra_network_acl_arn" = null + "intra_network_acl_id" = null + "intra_route_table_association_ids" = [] + "intra_route_table_ids" = [] + "intra_subnet_arns" = [] + "intra_subnet_objects" = [] + "intra_subnets" = [] + "intra_subnets_cidr_blocks" = tolist([]) + "intra_subnets_ipv6_cidr_blocks" = tolist([]) + "name" = "swarms" + "nat_ids" = [] + "nat_public_ips" = tolist([]) + "natgw_ids" = [] + "natgw_interface_ids" = [] + "outpost_network_acl_arn" = null + "outpost_network_acl_id" = null + "outpost_subnet_arns" = [] + "outpost_subnet_objects" = [] + "outpost_subnets" = [] + "outpost_subnets_cidr_blocks" = tolist([]) + "outpost_subnets_ipv6_cidr_blocks" = tolist([]) + "private_ipv6_egress_route_ids" = [] + "private_nat_gateway_route_ids" = [] + "private_network_acl_arn" = null + "private_network_acl_id" = null + "private_route_table_association_ids" = [ + "rtbassoc-06f87a2b8106090d9", + "rtbassoc-085a04c2858a3164e", + "rtbassoc-0657562cf0177710a", + ] + "private_route_table_ids" = [ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ] + "private_subnet_arns" = [ + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc", + ] + "private_subnet_objects" = [ + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0ea44af2f97e12b1a" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2a" + "availability_zone_id" = "use2-az1" + "cidr_block" = "10.0.0.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-0ea44af2f97e12b1a" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "Private Subnet One" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "Private Subnet One" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0de17633cfb166d29" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2b" + "availability_zone_id" = "use2-az2" + "cidr_block" = "10.0.1.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-0de17633cfb166d29" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "Private Subnet Two" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "Private Subnet Two" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-0e926a2b5ae3f5acc" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2c" + "availability_zone_id" = "use2-az3" + "cidr_block" = "10.0.2.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-0e926a2b5ae3f5acc" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "swarms-private-us-east-2c" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "swarms-private-us-east-2c" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + ] + "private_subnets" = [ + "subnet-0ea44af2f97e12b1a", + "subnet-0de17633cfb166d29", + "subnet-0e926a2b5ae3f5acc", + ] + "private_subnets_cidr_blocks" = tolist([ + "10.0.0.0/24", + "10.0.1.0/24", + "10.0.2.0/24", + ]) + "private_subnets_ipv6_cidr_blocks" = tolist([]) + "public_internet_gateway_ipv6_route_id" = null + "public_internet_gateway_route_id" = "r-rtb-0445a2ea1c510657b1080289494" + "public_network_acl_arn" = null + "public_network_acl_id" = null + "public_route_table_association_ids" = [ + "rtbassoc-033c4fa608f79a594", + "rtbassoc-000d8799150d904e6", + "rtbassoc-0b047c79fe554852d", + ] + "public_route_table_ids" = [ + "rtb-0445a2ea1c510657b", + ] + "public_subnet_arns" = [ + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79", + "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0", + ] + "public_subnet_objects" = [ + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-04b3bdd4b0dc877f0" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2a" + "availability_zone_id" = "use2-az1" + "cidr_block" = "10.0.4.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-04b3bdd4b0dc877f0" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "swarms-public-us-east-2a" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "swarms-public-us-east-2a" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-051fecdee9028af79" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2b" + "availability_zone_id" = "use2-az2" + "cidr_block" = "10.0.5.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-051fecdee9028af79" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "swarms-public-us-east-2b" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "swarms-public-us-east-2b" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + { + "arn" = "arn:aws:ec2:us-east-2:916723593639:subnet/subnet-027f4abd09a6543f0" + "assign_ipv6_address_on_creation" = false + "availability_zone" = "us-east-2c" + "availability_zone_id" = "use2-az3" + "cidr_block" = "10.0.6.0/24" + "customer_owned_ipv4_pool" = "" + "enable_dns64" = false + "enable_lni_at_device_index" = 0 + "enable_resource_name_dns_a_record_on_launch" = false + "enable_resource_name_dns_aaaa_record_on_launch" = false + "id" = "subnet-027f4abd09a6543f0" + "ipv6_cidr_block" = "" + "ipv6_cidr_block_association_id" = "" + "ipv6_native" = false + "map_customer_owned_ip_on_launch" = false + "map_public_ip_on_launch" = false + "outpost_arn" = "" + "owner_id" = "916723593639" + "private_dns_hostname_type_on_launch" = "ip-name" + "tags" = tomap({ + "Name" = "swarms-public-us-east-2c" + "project" = "swarms" + }) + "tags_all" = tomap({ + "Name" = "swarms-public-us-east-2c" + "project" = "swarms" + }) + "timeouts" = null /* object */ + "vpc_id" = "vpc-0b4cedd083227068d" + }, + ] + "public_subnets" = [ + "subnet-04b3bdd4b0dc877f0", + "subnet-051fecdee9028af79", + "subnet-027f4abd09a6543f0", + ] + "public_subnets_cidr_blocks" = tolist([ + "10.0.4.0/24", + "10.0.5.0/24", + "10.0.6.0/24", + ]) + "public_subnets_ipv6_cidr_blocks" = tolist([]) + "redshift_network_acl_arn" = null + "redshift_network_acl_id" = null + "redshift_public_route_table_association_ids" = [] + "redshift_route_table_association_ids" = [] + "redshift_route_table_ids" = tolist([ + "rtb-033eec802f743baca", + "rtb-0bbc3caa1cdc40cd0", + "rtb-081f2bb265d2703ee", + ]) + "redshift_subnet_arns" = [] + "redshift_subnet_group" = null + "redshift_subnet_objects" = [] + "redshift_subnets" = [] + "redshift_subnets_cidr_blocks" = tolist([]) + "redshift_subnets_ipv6_cidr_blocks" = tolist([]) + "this_customer_gateway" = {} + "vgw_arn" = null + "vgw_id" = null + "vpc_arn" = "arn:aws:ec2:us-east-2:916723593639:vpc/vpc-0b4cedd083227068d" + "vpc_cidr_block" = "10.0.0.0/16" + "vpc_enable_dns_hostnames" = true + "vpc_enable_dns_support" = true + "vpc_flow_log_cloudwatch_iam_role_arn" = "" + "vpc_flow_log_deliver_cross_account_role" = null + "vpc_flow_log_destination_arn" = "" + "vpc_flow_log_destination_type" = "cloud-watch-logs" + "vpc_flow_log_id" = null + "vpc_id" = "vpc-0b4cedd083227068d" + "vpc_instance_tenancy" = "default" + "vpc_ipv6_association_id" = "" + "vpc_ipv6_cidr_block" = "" + "vpc_main_route_table_id" = "rtb-053dc8c3d37f2f58f" + "vpc_owner_id" = "916723593639" + "vpc_secondary_cidr_blocks" = tolist([]) + } + "vpc_arn" = "arn:aws:ec2:us-east-2:916723593639:vpc/vpc-0b4cedd083227068d" + "vpc_cidr_block" = "10.0.0.0/16" + "vpc_enable_dns_hostnames" = true + "vpc_enable_dns_support" = true + "vpc_flow_log_cloudwatch_iam_role_arn" = "" + "vpc_flow_log_destination_arn" = "" + "vpc_flow_log_destination_type" = "cloud-watch-logs" + "vpc_flow_log_id" = null + "vpc_id" = "vpc-0b4cedd083227068d" + "vpc_instance_tenancy" = "default" + "vpc_ipv6_association_id" = "" + "vpc_ipv6_cidr_block" = "" + "vpc_main_route_table_id" = "rtb-053dc8c3d37f2f58f" + "vpc_owner_id" = "916723593639" + "vpc_secondary_cidr_blocks" = tolist([]) + } +} +dns = "api.swarms.ai" +profile = "swarms" +region = "us-east-2" +mdupont@mdupont-G470:~/2024/12/swarms-terraform/accounts/swarms$ diff --git a/accounts/swarms/mdupont/main.tf b/accounts/swarms/mdupont/main.tf new file mode 100644 index 0000000..e8eef16 --- /dev/null +++ b/accounts/swarms/mdupont/main.tf @@ -0,0 +1,21 @@ +# aws sts assume-role --role-arn arn:aws:iam::916723593639:role/github --profile mdupont --role-session-name mdupont +resource "aws_iam_role" "pdev" { + name = "mdupont_external" + description = "mdupont external role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + #AWS = "arn:aws:iam::354918380242:root" + AWS = "arn:aws:iam::767503528736:user/mdupont" + } + Action = "sts:AssumeRole" + #"Condition": { "Bool": { "aws:MultiFactorAuthPresent": "true" } } + } + + ] + }) +} diff --git a/accounts/swarms/pdev/main.tf b/accounts/swarms/pdev/main.tf new file mode 100644 index 0000000..848b7ef --- /dev/null +++ b/accounts/swarms/pdev/main.tf @@ -0,0 +1,20 @@ + +resource "aws_iam_role" "pdev" { + name = "pdev" + description = "pdev role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + AWS = "arn:aws:iam::354918380242:root" + } + Action = "sts:AssumeRole" + # "Condition": { "Bool": { "aws:MultiFactorAuthPresent": "true" } } + } + + ] + }) +} diff --git a/actions/Makefile b/actions/Makefile new file mode 100644 index 0000000..838d5de --- /dev/null +++ b/actions/Makefile @@ -0,0 +1,2 @@ +test: + AWS_PROFILE=swarms bash ./call_swarms.sh diff --git a/actions/Readme.md b/actions/Readme.md new file mode 100644 index 0000000..99efffb --- /dev/null +++ b/actions/Readme.md @@ -0,0 +1,37 @@ +Welcome to the the call swarms ssm framework, +it allows invocation of the swarms agent on a remote ssm server via the aws api. +This can be almost any cloud server that is reachable via ssm, that can be on prem +and in theory on another cloud. + +The file actions/call_swarms.sh is a Bash script designed to interact with AWS +services. It sets up environment variables for AWS region, tag keys and values, +Git repository details, and other parameters. The script defines functions to +retrieve instance IDs, send commands to instances via AWS SSM, and fetch command +outputs. It iterates over the instances, sends commands, waits for execution, +and retrieves outputs, logging them to CloudWatch. The script is structured to +facilitate remote invocation of a swarms agent on cloud servers. + +The script called is defined in `environments/call-swarms/main.tf` and applied in accounts/swarms/ like +```bash +pushd ../../accounts/swarms/ +#tofu apply +# apply only the one change +tofu apply -auto-approve --target module.call_swarms.aws_ssm_document.deploy-docker +# terraform apply +popd +``` + +It includes the file +resource "aws_ssm_document" "deploy" { + content = file("${local.codebase_root_path}/environments/call-swarms/deploy.yaml") +it will call + sudo su -c "bash -e -x /var/swarms/agent_workspace/boot.sh" swarms + + +and resource "aws_ssm_document" "deploy-docker" { + ~/swarms-terraform/environments/call-swarms/deploy-docker.yaml + It will call + sudo bash -e -x /opt/swarms/api/docker-boot.sh # needs root access + +The permissions to call aws from github is applied here in this terraform code +https://github.com/jmikedupont2/terraform-aws-oidc-github/pull/1 diff --git a/actions/call_swarms.org b/actions/call_swarms.org new file mode 100644 index 0000000..bf11307 --- /dev/null +++ b/actions/call_swarms.org @@ -0,0 +1,409 @@ +mdupont@mdupont-G470:~/swarms-terraform/actions$ AWS_PROFILE=swarms bash ./call_swarms.sh + ++ export REGION=us-east-2 ++ REGION=us-east-2 ++ TAG_KEY=Name ++ TAG_VALUE=docker-swarms-ami-t3.medium ++ GIT_URL=https://github.com/jmikedupont/swarms ++ export GIT_NAME=mdupont ++ GIT_NAME=mdupont ++ export GIT_VERSION=feature/squash2-docker ++ GIT_VERSION=feature/squash2-docker ++ DOCUMENT_NAME=deploy-docker ++ DOCUMENT_VERSION=1 ++ TIMEOUT_SECONDS=600 ++ MAX_CONCURRENCY=50 ++ MAX_ERRORS=0 +++ get_instance_ids +++ aws ec2 describe-instances --filters Name=tag:Name,Values=docker-swarms-ami-t3.medium --query 'Reservations[*].Instances[*].InstanceId' --output text --region us-east-2 ++ for instance in $(get_instance_ids) ++ echo 'Instance ID: i-0a3dae164f8f3c09a' +Instance ID: i-0a3dae164f8f3c09a +++ send_command i-0a3dae164f8f3c09a +++ local instance_id=i-0a3dae164f8f3c09a +++ aws ssm send-command --document-name deploy-docker --document-version 1 --targets '[{"Key":"InstanceIds","Values":["i-0a3dae164f8f3c09a"]}]' --parameters '{"GitUrl":["https://github.com/jmikedupont/swarms"],"GitName":["mdupont"],"GitVersion":["feature/squash2-docker"]}' --timeout-seconds 600 --max-concurrency 50 --max-errors 0 --region us-east-2 --output-s3-bucket-name swarms-session-logs-20241221151754799300000003 --cloud-watch-output-config '{"CloudWatchOutputEnabled":true,"CloudWatchLogGroupName":"/ssm/session-logs-20241221151803393300000006"}' ++ result='{ + "Command": { + "CommandId": "82d43144-a4f4-4b6d-a507-23ad5179e0b4", + "DocumentName": "deploy-docker", + "DocumentVersion": "1", + "Comment": "", + "ExpiresAfter": 1735009432.928, + "Parameters": { + "GitName": [ + "mdupont" + ], + "GitUrl": [ + "https://github.com/jmikedupont/swarms" + ], + "GitVersion": [ + "feature/squash2-docker" + ] + }, + "InstanceIds": [], + "Targets": [ + { + "Key": "InstanceIds", + "Values": [ + "i-0a3dae164f8f3c09a" + ] + } + ], + "RequestedDateTime": 1735005232.928, + "Status": "Pending", + "StatusDetails": "Pending", + "OutputS3Region": "us-east-2", + "OutputS3BucketName": "swarms-session-logs-20241221151754799300000003", + "OutputS3KeyPrefix": "", + "MaxConcurrency": "50", + "MaxErrors": "0", + "TargetCount": 0, + "CompletedCount": 0, + "ErrorCount": 0, + "DeliveryTimedOutCount": 0, + "ServiceRole": "", + "NotificationConfig": { + "NotificationArn": "", + "NotificationEvents": [], + "NotificationType": "" + }, + "CloudWatchOutputConfig": { + "CloudWatchLogGroupName": "/ssm/session-logs-20241221151803393300000006", + "CloudWatchOutputEnabled": true + }, + "TimeoutSeconds": 600, + "AlarmConfiguration": { + "IgnorePollAlarmFailure": false, + "Alarms": [] + }, + "TriggeredAlarms": [] + } +}' +++ jq -r .Command.CommandId +++ echo '{' '"Command":' '{' '"CommandId":' '"82d43144-a4f4-4b6d-a507-23ad5179e0b4",' '"DocumentName":' '"deploy-docker",' '"DocumentVersion":' '"1",' '"Comment":' '"",' '"ExpiresAfter":' 1735009432.928, '"Parameters":' '{' '"GitName":' '[' '"mdupont"' '],' '"GitUrl":' '[' '"https://github.com/jmikedupont/swarms"' '],' '"GitVersion":' '[' '"feature/squash2-docker"' ']' '},' '"InstanceIds":' '[],' '"Targets":' '[' '{' '"Key":' '"InstanceIds",' '"Values":' '[' '"i-0a3dae164f8f3c09a"' ']' '}' '],' '"RequestedDateTime":' 1735005232.928, '"Status":' '"Pending",' '"StatusDetails":' '"Pending",' '"OutputS3Region":' '"us-east-2",' '"OutputS3BucketName":' '"swarms-session-logs-20241221151754799300000003",' '"OutputS3KeyPrefix":' '"",' '"MaxConcurrency":' '"50",' '"MaxErrors":' '"0",' '"TargetCount":' 0, '"CompletedCount":' 0, '"ErrorCount":' 0, '"DeliveryTimedOutCount":' 0, '"ServiceRole":' '"",' '"NotificationConfig":' '{' '"NotificationArn":' '"",' '"NotificationEvents":' '[],' '"NotificationType":' '""' '},' '"CloudWatchOutputConfig":' '{' '"CloudWatchLogGroupName":' '"/ssm/session-logs-20241221151803393300000006",' '"CloudWatchOutputEnabled":' true '},' '"TimeoutSeconds":' 600, '"AlarmConfiguration":' '{' '"IgnorePollAlarmFailure":' false, '"Alarms":' '[]' '},' '"TriggeredAlarms":' '[]' '}' '}' ++ command_id=82d43144-a4f4-4b6d-a507-23ad5179e0b4 ++ aws ssm wait command-executed --command-id 82d43144-a4f4-4b6d-a507-23ad5179e0b4 --region us-east-2 --instance i-0a3dae164f8f3c09a ++ fetch_command_output 82d43144-a4f4-4b6d-a507-23ad5179e0b4 ++ local command_id=82d43144-a4f4-4b6d-a507-23ad5179e0b4 ++ jq -r '.CommandInvocations[] | {InstanceId, Status, Output}' + +Lets get the log outputs and the cloudtrails and the cloudwatch logs for this command ++ aws ssm list-command-invocations --command-id 82d43144-a4f4-4b6d-a507-23ad5179e0b4 --details --region us-east-2 +{ + "InstanceId": "i-0a3dae164f8f3c09a", + "Status": "Success", + "Output": null +} + + +To obtain the log outputs, cloudtrail logs, and cloudwatch logs for the specified AWS Systems +Manager (SSM) command, follow these steps: + +*** Step 1: Retrieve the Output from the Command Execution + +Since the output is ~null~ in your provided response, we'll first try to fetch the output directly. +#+BEGIN_SRC sh +aws ssm list-command-invocations --command-id 82d43144-a4f4-4b6d-a507-23ad5179e0b4 --details --region us-east-2 --profile swarms +#+END_SRC + +Lets fetch the results +#+RESULTS: +| { | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "CommandInvocations": | [ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| { | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "CommandId": | "82d43144-a4f4-4b6d-a507-23ad5179e0b4", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "InstanceId": | "i-0a3dae164f8f3c09a", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "InstanceName": | "ip-10-0-4-93.us-east-2.compute.internal", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "Comment": | "", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "DocumentName": | "deploy-docker", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "DocumentVersion": | "1", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "RequestedDateTime": | 1735005232.999, | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "Status": | "Success", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "StatusDetails": | "Success", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "StandardOutputUrl": | "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stdout", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "StandardErrorUrl": | "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stderr", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "CommandPlugins": | [ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| { | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "Name": | "DeployDocker", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "Status": | "Success", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "StatusDetails": | "Success", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "ResponseCode": | 0, | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "ResponseStartDateTime": | 1735005233.167, | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "ResponseFinishDateTime": | 1735005237.002, | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "Output": | "oops\nYour | branch | is | up | to | date | with | 'origin/feature/squash2-docker'.\n-- | No | entries | --\n\n----------ERROR-------\nerror: | remote | mdupont | already | exists.\nAlready | on | 'feature/squash2-docker'\nbash: | /opt/swarms/api/docker-boot.sh: | No | such | file | or | directory\nFailed | to | restart | swarms-docker.service: | Unit | swarms-docker.service | not | found.\n", | +| "StandardOutputUrl": | "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stdout", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "StandardErrorUrl": | "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stderr", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "OutputS3Region": | "us-east-2", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "OutputS3BucketName": | "swarms-session-logs-20241221151754799300000003", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "OutputS3KeyPrefix": | 82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/awsrunShellScript | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| } | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| ], | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "ServiceRole": | "", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "NotificationConfig": | { | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "NotificationArn": | "", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "NotificationEvents": | [], | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "NotificationType": | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| }, | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "CloudWatchOutputConfig": | { | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "CloudWatchLogGroupName": | "/ssm/session-logs-20241221151803393300000006", | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| "CloudWatchOutputEnabled": | true | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| } | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| } | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| ] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| } | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + +If the ~--details~ flag doesn't provide the output, you may need to retrieve it using another +method. One common approach is to use CloudWatch Logs. + +*** Step 2: Retrieve Output from CloudWatch Logs + +Assuming the SSM command execution generated logs in CloudWatch Logs, you can find the specific log +group and stream associated with your command ID. + +1. *Get the Log Group Name*: + The log group name typically follows a pattern like ~/aws/ssm//~. You need to + determine which exact account ID is used for this output. + +2. *Search for Logs in CloudWatch Logs*: + Use the AWS Management Console or ~aws logs~ command line tool to search for logs in the + appropriate log group. + +**** Using AWS CLI: +#+BEGIN_SRC sh +aws logs get-log-events --log-group-name /aws/ssm/us-east-2/123456789012 --start-time 1672531200000 +--end-time 1675123200000 +#+END_SRC + +Replace ~123456789012~ with your actual account ID and set the time range to match when the command +was executed. + +**** Using AWS Management Console: + +1. Open the CloudWatch Logs console. +2. Navigate to the log group ~/aws/ssm/us-east-2/123456789012~. +3. Use the search bar to filter logs for the command ID ~82d43144-a4f4-4b6d-a507-23ad5179e0b4~. + +*** Step 3: Retrieve CloudTrail Logs + +CloudTrail records all API calls made by AWS users. You can find the logs that correspond to your +SSM command execution. + +1. *Open the CloudTrail Console*: + Navigate to the CloudTrail console and select your trail or go directly to the event management + page. + +2. *Search for Events*: + Use the search bar to filter events based on ~EventName~ being ~StartCommand~, ~CancelCommand~, + etc., and use the command ID ~82d43144-a4f4-4b6d-a507-23ad5179e0b4~. + + + + ++ aws ssm list-command-invocations --command-id 6c96c523-2970-4071-9d5d-f95a041703ef --details --region us-east-2 +{ + "CommandInvocations": [ + { + "CommandId": "6c96c523-2970-4071-9d5d-f95a041703ef", + "InstanceId": "i-0a3dae164f8f3c09a", + "InstanceName": "ip-10-0-4-93.us-east-2.compute.internal", + "Comment": "", + "DocumentName": "deploy-docker", + "DocumentVersion": "$LATEST", + "RequestedDateTime": 1735058288.722, + "Status": "Success", + "StatusDetails": "Success", + "StandardOutputUrl": "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stdout", + "StandardErrorUrl": "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stderr", + "CommandPlugins": [ + { + "Name": "DeployDocker", + "Status": "Success", + "StatusDetails": "Success", + "ResponseCode": 0, + "ResponseStartDateTime": 1735058288.981, + "ResponseFinishDateTime": 1735058293.354, + "Output": "cd /opt/swarms/\nexport HOME=/home/swarms/\ngit config --global user.email \"git-agent@swarms.ai\"\ngit config --global user.name \"git agent\"\ngit remote add mdupont https://github.com/jmikedupont2/swarms || echo oops\ngit remote set-url mdupont https://github.com/jmikedupont2/swarms || echo oops2\necho git url https://github.com/jmikedupont2/swarms\ngit fetch --all|| echo oops \ngit pull mdupont feature/squash2-docker|| echo oops \ngit checkout --force feature/squash2-docker || echo checkout\ngit log -3 || echo git fail\nls /opt/swarms/api/*.sh || echo no files\noops\ngit url https://github.com/jmikedupont2/swarms\nFetching origin\nFetching mdupont\nUpdating 62a7b307..68755906\nFast-forward\n api/docker-boot.sh | 16 +++++++++\n api/rundocker.sh | 76 +++++++++++++++++++++++++++++++++++++++\n api/systemd/swarms-docker.service | 25 +++++++++++++\n 3 files changed, 117 insertions(+)\n create mode 100644 api/docker-boot.sh\n create mode 100644 api/rundocker.sh\n create mode 100644 api/systemd/swarms-docker.service\nYour branch is up to date with 'origin/feature/squash2-docker'.\ncommit 68755906be19d73deb0c8c5144323ecd3c203068\nAuthor: mike dupont \nD\n---Output truncated---\n----------ERROR-------\n+ chown -R swarms:swarms /opt/swarms\n+ tee /opt/swarms/update_docker_swarms.sh\n+ sudo chown swarms /opt/swarms/update_docker_swarms.sh\n+ su -c 'bash -e -x /opt/swarms/update_docker_swarms.sh' swarms\n+ cd /opt/swarms/\n+ export HOME=/home/swarms/\n+ HOME=/home/swarms/\n+ git config --global user.email git-agent@swarms.ai\n+ git config --global user.name 'git agent'\n+ git remote add mdupont https://github.com/jmikedupont2/swarms\nerror: remote mdupont already exists.\n+ echo oops\n+ git remote set-url mdupont https://github.com/jmikedupont2/swarms\n+ echo git url https://github.com/jmikedupont2/swarms\n+ git fetch --all\nFrom https://github.com/jmikedupont2/swarms\n 62a7b307..68755906 feature/squash2-docker -> origin/feature/squash2-docker\n+ git pull mdupont feature/squash2-docker\nFrom https://github.com/jmikedupont2/swarms\n * branch feature/squash2-docker -> FETCH_HEAD\n+ git checkout --force feature/squash2-docker\nAlready on 'feature/squash2-docker'\n+ git log -3\n+ ls /opt/swarms/api/boot.sh /opt/swarms/api/boot_fast.sh /opt/swarms/api/docker-boot.sh /opt/swarms/api/install.sh /opt/swarms/api/just_run.sh /opt/swarms/api/rerun-git.sh /opt/swarms/api/rerun.sh /opt/swarms/api/run_logs.sh /opt/swa\n---Error truncated----", + "StandardOutputUrl": "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stdout", + "StandardErrorUrl": " + "OutputS3Region": "us-east-2", + "OutputS3BucketName": "swarms-session-logs-20241221151754799300000003", + "OutputS3KeyPrefix": "6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/awsrunShellScript" + } + ], + "ServiceRole": "", + "NotificationConfig": { + "NotificationArn": "", + "NotificationEvents": [], + "NotificationType": "" + }, + "CloudWatchOutputConfig": { + "CloudWatchLogGroupName": "/ssm/session-logs-20241221151803393300000006", + "CloudWatchOutputEnabled": true + } + } + ] +} + +aws logs get-log-events --log-group-name /ssm/session-logs-20241221151803393300000006 +#aws logs describe-log-streams --log-group-name "CloudWatchLogGroupName": "/ssm/session-logs-20241221151803393300000006 --profile swarms --region us-east-2 +#aws logs describe-log-streams --log-group-name /ssm/session-logs-20241221151803393300000006 --profile swarms --region us-east-2 +aws logs describe-log-streams --log-group-name /ssm/session-logs-20241221151803393300000006 --profile swarms --region us-east-2 --output json | jq . > logstreams.json +aws logs get-log-events --log-group-name /ssm/session-logs-20241221151803393300000006 --log-stream-name $(jq ".logStreams[0].logStreamName" logstreams.json) --region us-east-2 --profile swarms + +#jq ".logStreams[0].logStreamName" logstreams.json + awslogs groups + + /aws/application-signals/data +/aws/appsignals/ec2 +/aws/appsignals/eks +/aws/appsignals/generic +/aws/appsignals/k8s + + /ec2/init +/ec2/secure +/ec2/system +/ssm/session-logs-20241221151803393300000006 +/swarms/nginx_error +/swarms/ngnix_access +/swarms/systemd + + awslogs get /swarms/systemd + + awslogs get /ssm/session-logs-20241221151803393300000006 + + + + + + mdupont@mdupont-G470:~/swarms-terraform/actions$ bash ./call_swarms.sh ++ export REGION=us-east-2 ++ REGION=us-east-2 ++ TAG_KEY=Name ++ TAG_VALUE=docker-swarms-ami-t3.medium ++ GIT_URL=https://github.com/jmikedupont2/swarms ++ export GIT_NAME=mdupont ++ GIT_NAME=mdupont ++ export GIT_VERSION=feature/squash2-docker ++ GIT_VERSION=feature/squash2-docker ++ DOCUMENT_NAME=deploy-docker ++ DOCUMENT_VERSION='$LATEST' ++ TIMEOUT_SECONDS=600 ++ MAX_CONCURRENCY=50 ++ MAX_ERRORS=0 +++ get_instance_ids +++ aws ec2 describe-instances --filters Name=tag:Name,Values=docker-swarms-ami-t3.medium --query 'Reservations[*].Instances[*].InstanceId' --output text --region us-east-2 ++ for instance in $(get_instance_ids) ++ echo 'Instance ID: i-0a3dae164f8f3c09a' +Instance ID: i-0a3dae164f8f3c09a +++ send_command i-0a3dae164f8f3c09a +++ local instance_id=i-0a3dae164f8f3c09a +++ aws ssm send-command --document-name deploy-docker --document-version '$LATEST' --targets '[{"Key":"InstanceIds","Values":["i-0a3dae164f8f3c09a"]}]' --parameters '{"GitUrl":["https://github.com/jmikedupont2/swarms"],"GitName":["mdupont"],"GitVersion":["feature/squash2-docker"]}' --timeout-seconds 600 --max-concurrency 50 --max-errors 0 --region us-east-2 --output-s3-bucket-name swarms-session-logs-20241221151754799300000003 --cloud-watch-output-config '{"CloudWatchOutputEnabled":true,"CloudWatchLogGroupName":"/ssm/session-logs-20241221151803393300000006"}' ++ result='{ + "Command": { + "CommandId": "11548a23-624f-4fac-a085-d813e91f5fff", + "DocumentName": "deploy-docker", + "DocumentVersion": "$LATEST", + "Comment": "", + "ExpiresAfter": 1735139787.028, + "Parameters": { + "GitName": [ + "mdupont" + ], + "GitUrl": [ + "https://github.com/jmikedupont2/swarms" + ], + "GitVersion": [ + "feature/squash2-docker" + ] + }, + "InstanceIds": [], + "Targets": [ + { + "Key": "InstanceIds", + "Values": [ + "i-0a3dae164f8f3c09a" + ] + } + ], + "RequestedDateTime": 1735135587.028, + "Status": "Pending", + "StatusDetails": "Pending", + "OutputS3Region": "us-east-2", + "OutputS3BucketName": "swarms-session-logs-20241221151754799300000003", + "OutputS3KeyPrefix": "", + "MaxConcurrency": "50", + "MaxErrors": "0", + "TargetCount": 0, + "CompletedCount": 0, + "ErrorCount": 0, + "DeliveryTimedOutCount": 0, + "ServiceRole": "", + "NotificationConfig": { + "NotificationArn": "", + "NotificationEvents": [], + "NotificationType": "" + }, + "CloudWatchOutputConfig": { + "CloudWatchLogGroupName": "/ssm/session-logs-20241221151803393300000006", + "CloudWatchOutputEnabled": true + }, + "TimeoutSeconds": 600, + "AlarmConfiguration": { + "IgnorePollAlarmFailure": false, + "Alarms": [] + }, + "TriggeredAlarms": [] + } +}' +++ jq -r .Command.CommandId +lets fetch the full log +++ echo '{' '"Command":' '{' '"CommandId":' '"11548a23-624f-4fac-a085-d813e91f5fff",' '"DocumentName":' '"deploy-docker",' '"DocumentVersion":' '"$LATEST",' '"Comment":' '"",' '"ExpiresAfter":' 1735139787.028, '"Parameters":' '{' '"GitName":' '[' '"mdupont"' '],' '"GitUrl":' '[' '"https://github.com/jmikedupont2/swarms"' '],' '"GitVersion":' '[' '"feature/squash2-docker"' ']' '},' '"InstanceIds":' '[],' '"Targets":' '[' '{' '"Key":' '"InstanceIds",' '"Values":' '[' '"i-0a3dae164f8f3c09a"' ']' '}' '],' '"RequestedDateTime":' 1735135587.028, '"Status":' '"Pending",' '"StatusDetails":' '"Pending",' '"OutputS3Region":' '"us-east-2",' '"OutputS3BucketName":' '"swarms-session-logs-20241221151754799300000003",' '"OutputS3KeyPrefix":' '"",' '"MaxConcurrency":' '"50",' '"MaxErrors":' '"0",' '"TargetCount":' 0, '"CompletedCount":' 0, '"ErrorCount":' 0, '"DeliveryTimedOutCount":' 0, '"ServiceRole":' '"",' '"NotificationConfig":' '{' '"NotificationArn":' '"",' '"NotificationEvents":' '[],' '"NotificationType":' '""' '},' '"CloudWatchOutputConfig":' '{' '"CloudWatchLogGroupName":' '"/ssm/session-logs-20241221151803393300000006",' '"CloudWatchOutputEnabled":' true '},' '"TimeoutSeconds":' 600, '"AlarmConfiguration":' '{' '"IgnorePollAlarmFailure":' false, '"Alarms":' '[]' '},' '"TriggeredAlarms":' '[]' '}' '}' ++ command_id=11548a23-624f-4fac-a085-d813e91f5fff ++ aws ssm wait command-executed --command-id 11548a23-624f-4fac-a085-d813e91f5fff --region us-east-2 --instance i-0a3dae164f8f3c09a ++ fetch_command_output 11548a23-624f-4fac-a085-d813e91f5fff ++ local command_id=11548a23-624f-4fac-a085-d813e91f5fff ++ jq . ++ aws ssm list-command-invocations --command-id 11548a23-624f-4fac-a085-d813e91f5fff --details --region us-east-2 +{ + "CommandInvocations": [ + { + "CommandId": "11548a23-624f-4fac-a085-d813e91f5fff", + "InstanceId": "i-0a3dae164f8f3c09a", + "InstanceName": "ip-10-0-4-93.us-east-2.compute.internal", + "Comment": "", + "DocumentName": "deploy-docker", + "DocumentVersion": "$LATEST", + "RequestedDateTime": 1735135587.107, + "Status": "Success", + "StatusDetails": "Success", + "StandardOutputUrl": "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/11548a23-624f-4fac-a085-d813e91f5fff/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stdout", + "StandardErrorUrl": "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/11548a23-624f-4fac-a085-d813e91f5fff/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stderr", + "CommandPlugins": [ + { + "Name": "DeployDocker", + "Status": "Success", + "StatusDetails": "Success", + "ResponseCode": 0, + "ResponseStartDateTime": 1735135587.335, + "ResponseFinishDateTime": 1735135668.743, + "Output": "cd /opt/swarms/\nexport HOME=/home/swarms/\ngit config --global user.email \"git-agent@swarms.ai\"\ngit config --global user.name \"git agent\"\ngit remote add mdupont https://github.com/jmikedupont2/swarms || echo oops\ngit remote set-url mdupont https://github.com/jmikedupont2/swarms || echo oops2\necho git url https://github.com/jmikedupont2/swarms\ngit fetch --all|| echo oops \ngit pull mdupont feature/squash2-docker|| echo oops \ngit checkout --force feature/squash2-docker || echo checkout\ngit log -3 || echo git fail\nls /opt/swarms/api/*.sh || echo no files\noops\ngit url https://github.com/jmikedupont2/swarms\nFetching origin\nFetching mdupont\nUpdating 68755906..3e0a7ead\nFast-forward\n api/docker-boot.sh | 19 +++++++++++++++----\n api/rundocker.sh | 12 ++++++++++--\n 2 files changed, 25 insertions(+), 6 deletions(-)\nYour branch is up to date with 'origin/feature/squash2-docker'.\ncommit 3e0a7ead57e7f53cfd5ac0034b5ec40ac7cbfafc\nAuthor: mike dupont \nDate: Tue Dec 24 12:19:11 2024 -0500\n\n shell check\n\ncommit 24af710cb49da489ae8e1aa86a641e02a5e01f2c\nAuthor: mike dupont \nDate: Tue Dec 24 12:17:53 2024 -0500\n\n going to test\n\n\n---Output truncated---\n----------ERROR-------\n+ chown -R swarms:swarms /opt/swarms\n+ tee /opt/swarms/update_docker_swarms.sh\n+ sudo chown swarms /opt/swarms/update_docker_swarms.sh\n+ su -c 'bash -e -x /opt/swarms/update_docker_swarms.sh' swarms\n+ cd /opt/swarms/\n+ export HOME=/home/swarms/\n+ HOME=/home/swarms/\n+ git config --global user.email git-agent@swarms.ai\n+ git config --global user.name 'git agent'\n+ git remote add mdupont https://github.com/jmikedupont2/swarms\nerror: remote mdupont already exists.\n+ echo oops\n+ git remote set-url mdupont https://github.com/jmikedupont2/swarms\n+ echo git url https://github.com/jmikedupont2/swarms\n+ git fetch --all\nFrom https://github.com/jmikedupont2/swarms\n 68755906..3e0a7ead feature/squash2-docker -> origin/feature/squash2-docker\nFrom https://github.com/jmikedupont2/swarms\n 68755906..3e0a7ead feature/squash2-docker -> mdupont/feature/squash2-docker\n+ git pull mdupont feature/squash2-docker\nFrom https://github.com/jmikedupont2/swarms\n * branch feature/squash2-docker -> FETCH_HEAD\n+ git checkout --force feature/squash2-docker\nAlready on 'feature/squash2-docker'\n+ git log -3\n+ ls /opt/swarms/api/boot.sh /opt/swarms/api/boot_fast.sh /opt/swarms/api/docker-boot.sh /opt/swarms/api/insta\n---Error truncated----", + "StandardOutputUrl": "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/11548a23-624f-4fac-a085-d813e91f5fff/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stdout", + "StandardErrorUrl": "https://s3.us-east-2.amazonaws.com/swarms-session-logs-20241221151754799300000003/11548a23-624f-4fac-a085-d813e91f5fff/i-0a3dae164f8f3c09a/awsrunShellScript/DeployDocker/stderr", + "OutputS3Region": "us-east-2", + "OutputS3BucketName": "swarms-session-logs-20241221151754799300000003", + "OutputS3KeyPrefix": "11548a23-624f-4fac-a085-d813e91f5fff/i-0a3dae164f8f3c09a/awsrunShellScript" + } + ], + "ServiceRole": "", + "NotificationConfig": { + "NotificationArn": "", + "NotificationEvents": [], + "NotificationType": "" + }, + "CloudWatchOutputConfig": { + "CloudWatchLogGroupName": "/ssm/session-logs-20241221151803393300000006", + "CloudWatchOutputEnabled": true + } + } + ] +} diff --git a/actions/call_swarms.sh b/actions/call_swarms.sh new file mode 100644 index 0000000..2e37d0e --- /dev/null +++ b/actions/call_swarms.sh @@ -0,0 +1,84 @@ +#!/bin/bash +# convert this to python boto3 +set -e +set -x +# Constants +export REGION="${REGION:-us-east-2}" +#export AWS_PROFILE="${AWS_PROFILE:-swarms}" +#export AWS_PROFILE="${AWS_PROFILE}" only needed for testing locally +TAG_KEY="${TAG_KEY:-Name}" + +# which servers to target +TAG_VALUE="${TAG_VALUE:-docker-swarms-ami-t3.medium}" + +#what git remote +GIT_URL="${GIT_URL:-https://github.com/jmikedupont2/swarms}" +#GIT_URL="${GIT_URL:-git@github.com:jmikedupont2/swarms.git}" +#git@github.com:username/reponame.git + +# what to name the repo +export GIT_NAME="${GIT_NAME:-mdupont}" + +# what version of swarms to deploy +export GIT_VERSION="${GIT_VERSION:-feature/squash2-docker}" + +# what script to call? +DOCUMENT_NAME="${DOCUMENT_NAME:-deploy-docker}" + +# aws ssm send-command --document-name "deploy-docker" --document-version "\$LATEST" --targets '[{"Key":"InstanceIds","Values":["i-0a3dae164f8f3c09a"]}]' --parameters '{"GitUrl":["https://github.com/jmikedupont/swarms"],"GitName":["mdupont"],"GitVersion":["feature/squash2-docker"]}' --timeout-seconds 600 --max-concurrency "50" --max-errors "0" --output-s3-bucket-name "swarms-session-logs-20241221151754799300000003" --cloud-watch-output-config '{"CloudWatchLogGroupName":"/ssm/session-logs-20241221151803393300000006","CloudWatchOutputEnabled":true}' --region us-east-2 + +# what version +DOCUMENT_VERSION="${DOCUMENT_VERSION:-\$LATEST}" + +TIMEOUT_SECONDS="${TIMEOUT_SECONDS:-600}" +MAX_CONCURRENCY="${MAX_CONCURRENCY:-50}" +MAX_ERRORS="${MAX_ERRORS:-0}" + +# Function to get instance IDs +get_instance_ids() { + aws ec2 describe-instances \ + --filters "Name=tag:$TAG_KEY,Values=$TAG_VALUE" \ + --query "Reservations[*].Instances[*].InstanceId" \ + --output text \ + --region $REGION +} + +# Function to send command to instance +send_command() { + local instance_id="$1" + aws ssm send-command \ + --document-name "$DOCUMENT_NAME" \ + --document-version "$DOCUMENT_VERSION" \ + --targets "[{\"Key\":\"InstanceIds\",\"Values\":[\"$instance_id\"]}]" \ + --parameters "{\"GitUrl\":[\"$GIT_URL\"],\"GitName\":[\"$GIT_NAME\"],\"GitVersion\":[\"$GIT_VERSION\"]}" \ + --timeout-seconds $TIMEOUT_SECONDS \ + --max-concurrency "$MAX_CONCURRENCY" \ + --max-errors "$MAX_ERRORS" \ + --region $REGION \ + --output-s3-bucket-name "swarms-session-logs-20241221151754799300000003" \ + --cloud-watch-output-config '{"CloudWatchOutputEnabled":true,"CloudWatchLogGroupName":"/ssm/session-logs-20241221151803393300000006"}' + +} + +# Function to fetch command output +fetch_command_output() { + local command_id="$1" + #aws ssm list-command-invocations --command-id 82d43144-a4f4-4b6d-a507-23ad5179e0b4 --details --region us-east-2 --profile swarms + aws ssm list-command-invocations \ + --command-id "$command_id" \ + --details \ + --region $REGION | jq . #-r '.CommandInvocations[] | {InstanceId, Status, Output}' +} + +# Main script execution +for instance in $(get_instance_ids); do + echo "Instance ID: $instance" + result=$(send_command "$instance") + command_id=$(echo $result | jq -r '.Command.CommandId') + + # Wait for the command to complete + aws ssm wait command-executed --command-id "$command_id" --region $REGION --instance $instance + + # Fetch and print the command output + fetch_command_output "$command_id" +done diff --git a/ai-notes.md b/ai-notes.md new file mode 100644 index 0000000..668d832 --- /dev/null +++ b/ai-notes.md @@ -0,0 +1,415 @@ +``` +help me create a c4/semantic web/mermaid plantuml deployment diagram +for a self service cognito enabled, smart agent server hosting, +with many different deployment options ranging from t2-small to any larger size. +using terraform, aws, ec2, alb,diagram +accounts/swarms + -> environments/swarms-aws-agent-api/dev/us-east-1 + godaddy -> ns api.swarms.ai -> + route53 -> cname ->application load balancer + -> autoscaling group -> launch template -> ami + AMI : components/machine_image/ubuntu-fastapi.pkr.hcl + + swarms -> ec2, user data + github clone branch run api/install.sh, swarms user, app/boot.sh as swarms user (virtual env) + instance profile (our server, customer server) -> + todo :read access to specific ssm secrets -> openai key + servers tagged as user X can access secrets tagged Y. + outgoing ip address : (can talk to other servers) (allow certain ip ranges) + , nginx, systemd (isolate user process, write access to home), uvicorn -> swarms 4 processes fast api. + + githubactions -> oidc connection -> aws -> ssm document -> update your part of the server. + githubactions -> oidc connection -> aws -> ssm document -> terraform -> create new clusters + +shared hosted aws account : + profiles for each customer, + tags for customer resources + which api calls they can use + tags for cost tracking + custom vpc, subsets network resources. + + on servers, the agents will have own aws role/profiles (like lamda) + either each customer/agent gets own unix user, homedir, storage, email , backup. + containerization, k8s, docker. +``` + +# diagram1 + +```plantuml +@startuml + +!define RECTANGLE class + +RECTANGLE "Self-Service Cognito-Enabled Smart Agent Server" { + (User) +} + +rectangle "Accounts/Swarm" { + usecase "Create Environments" as U1 + usecase "Deploy EC2 Instances" as U2 + usecase "Manage Load Balancer" as U3 + usecase "Configure Auto-Scaling" as U4 + usecase "Provision AMI" as U5 + usecase "Access SSM Secrets" as U6 + usecase "Update Server Configuration" as U7 + usecase "Track Customer API Calls" as U8 + usecase "Provision Shared Resources" as U9 +} + +User --> U1 : "Creates / Manages" +User --> U2 : "Deploys Instances" +User --> U3 : "Configures Load Balancer" +User --> U4 : "Sets Auto-Scaling Parameters" +User --> U5 : "Uses AMI for Deployment" +User --> U6 : "Requests Secrets for Access" +User --> U7 : "Updates Configuration" +User --> U8 : "Monitors API Usage" +User --> U9 : "Shares Resources among Customers" + +U1 --> "Route53" : "Configures CNAME" +U2 --> "EC2" : "Launches Instances" +U3 --> "ALB" : "Routes Traffic" +U4 --> "AutoScaling Group" : "Manages Scalability" +U5 --> "AMI" : "Provides Machine Image" +U7 --> "GitHub Actions" : "Automates Deploy" +U6 --> "Instance Profile" : "Manages Permissions" +U8 --> "Tags" : "Tracks Costs" + +note right of U5 : Component: \ncomponents/machine_image/ubuntu-fastapi.pkr.hcl +note right of U8 : Custom VPC \n\n Customer Profiles + +@enduml +``` +![1000019322](https://github.com/user-attachments/assets/7f2c8f90-8222-4344-a007-59ea518b0f00) +https://www.planttext.com/?text=RPJVRzem4CVVyrVSUDeUA1gK_dgOccQeGu8AaRAdIfCnbs0riPalRbTD-zztjfCre3q6V_pr-NE7RwaHjd5rbOYgCg-r-gLqcIGVTbWeZR2UPO_fm_rY1h8IH4do_iCWnweOvcXVb4J8JAbLOuOpBROLxY2lEHRI4dK3da4xWDy9mCSDeVsK_4aIYx8HkgmG1gcKnkc6Hlchi7K47Q4Kn8kPHT4WpFIBiaRN74W340IRpttgAnugymQpR0nppNVI4bjitCUMGeiIOM743kv4vJ4RiSdHcKONgdGM8NLygLoAYhL4yBmFhgnvKQICXdGvZ-lJ_ZhV38aWpvTiGLhiQh_eGvl3pjymQeBkRD5mRAGl-_IZ5V8PCaUDgHbFLtF8k7TjyDN_gyltmd9NraZ6sNzghdqVV4DWEFp2Gk6cqqqmWkYA1ZrYx8cecdgUUyGaP7JwwCHq3pptOCv2ZXJ3IbXHOuFsA7NgOVuahnaAOo4MuQUXprruReq_7H87jyO37nlHPoz3Pb1F9z9xr9MdbiRFjgKWUqDifOzSXvjumVGk64Hj-3fTc_ZaXFrwVh0shi7fP71YAdj8bpFE7KUA9paG2-6qtEEnxabaqiLTO48Y0Kz6KIZ9w-VjEfljnDvRumw1RliAVaLiLQMQDzuoRRVuCRvfHsV7r5B8lNy9vdMyzcNaxbNppMrPgnT6OOi_hGusOtCOleXmLpfmQg4gF2fZXRPMH24cOTzXnwCeOsYOTjyVRYWxr8R_A5QL-mPC4USYC_N1Q1w46tZIilkWKHqB_w5yuIEtvGV63Gl1bHpKsU7PdkrbTPhdknrR1jzN6USzwVT_MbiyFuAtg7VyPFu5 + +### Explanation: +- This diagram represents the primary use cases associated with your smart agent server project. +- Each use case such as creating environments, deploying EC2 instances, and accessing SSM secrets represents functionalities your system supports. +- Annotations explain the components (like AMI) and setups (like custom VPC) tied to specific activities. + + +```plantuml +@startuml + +!define RECTANGLE(x) rectangle x as x +!define DIAMOND(x) diamond x as x + +package "Accounts/Swarm" { + RECTANGLE(swarms-aws-agent-api) { + package "Environments" { + RECTANGLE(dev) { + RECTANGLE(us_east_1) { + RECTANGLE(Godaddy) + RECTANGLE(Route53) + RECTANGLE(ALB) as alb + RECTANGLE(AutoScalingGroup) as asg + RECTANGLE(LaunchTemplate) as lt + RECTANGLE(AMI) as ami + } + } + } + } + Godaddy -> Route53: ns api.swarms.ai + Route53 -> alb: CNAME + alb -> asg + asg -> lt + lt -> ami: "components/machine_image/ubuntu-fastapi.pkr.hcl" + + package "EC2 Instance" { + RECTANGLE(EC2) { + RECTANGLE(UserData) + RECTANGLE(InstanceProfile) + RECTANGLE(Nginx) + RECTANGLE(Systemd) + RECTANGLE(Uvicorn) + RECTANGLE(FastAPI) + + EC2 -> UserData: github clone branch run + EC2 -> InstanceProfile: read access to SSM secrets + EC2 -> Nginx: isolates user process + EC2 -> Systemd: manage services + EC2 -> Uvicorn: "4 instances of FastAPI" + } + } + + UserData -> EC2: "run api/install.sh" + InstanceProfile -> EC2: "access tagged secrets" + Nginx -> Systemd: "manage processes" +} + +package "GitHub Actions" { + RECTANGLE(OIDC) { + RECTANGLE(SSM_Document) + RECTANGLE(Terraform) + } + OIDC -> AWS + AWS -> SSM_Document: "update server" + OIDC -> AWS: "create new clusters" + AWS -> Terraform +} + +package "Shared Hosted AWS Account" { + RECTANGLE(Profiles) { + RECTANGLE(Tags) + RECTANGLE(VPC) + } + Profiles -> Tags: "customer resources" + Profiles -> VPC: "subnet resources" + Profiles -> EC2: "instance roles" +} + +@enduml + + +Here's an updated PlantUML deployment diagram reflecting your architecture with key components: + +```plantuml +@startuml + +!define RECTANGLE(x) rectangle x as x +!define DIAMOND(x) diamond x as x + +package "Accounts/Swarm" { + RECTANGLE(swarms-aws-agent-api) { + package "Environments" { + RECTANGLE(dev) { + RECTANGLE(us_east_1) { + RECTANGLE(Godaddy) + RECTANGLE(Route53) + RECTANGLE(ALB) as alb + RECTANGLE(AutoScalingGroup) as asg + RECTANGLE(LaunchTemplate) as lt + RECTANGLE(AMI) as ami + } + } + } + } + Godaddy -> Route53: ns api.swarms.ai + Route53 -> alb: CNAME + alb -> asg + asg -> lt + lt -> ami: "components/machine_image/ubuntu-fastapi.pkr.hcl" + + package "EC2 Instance" { + RECTANGLE(EC2) { + RECTANGLE(UserData) + RECTANGLE(InstanceProfile) + RECTANGLE(Nginx) + RECTANGLE(Systemd) + RECTANGLE(Uvicorn) + RECTANGLE(FastAPI) + + EC2 -> UserData: github clone branch run + EC2 -> InstanceProfile: read access to SSM secrets + EC2 -> Nginx: isolates user process + EC2 -> Systemd: manage services + EC2 -> Uvicorn: "4 instances of FastAPI" + } + } + + UserData -> EC2: "run api/install.sh" + InstanceProfile -> EC2: "access tagged secrets" + Nginx -> Systemd: "manage processes" +} + +package "GitHub Actions" { + RECTANGLE(OIDC) { + RECTANGLE(SSM_Document) + RECTANGLE(Terraform) + } + OIDC -> AWS + AWS -> SSM_Document: "update server" + OIDC -> AWS: "create new clusters" + AWS -> Terraform +} + +package "Shared Hosted AWS Account" { + RECTANGLE(Profiles) { + RECTANGLE(Tags) + RECTANGLE(VPC) + } + Profiles -> Tags: "customer resources" + Profiles -> VPC: "subnet resources" + Profiles -> EC2: "instance roles" +} + +@enduml +``` + + +It seems there was some confusion. Here’s a refined version of the complete PlantUML deployment diagram for your smart agent server setup: + +```plantuml +@startuml + +!define RECTANGLE(x) rectangle x as x +!define DIAMOND(x) diamond x as x + +package "Accounts/Swarms" { + RECTANGLE("swarms-aws-agent-api") { + package "Environments" { + RECTANGLE("dev") { + RECTANGLE("us-east-1") { + RECTANGLE(Godaddy) + RECTANGLE(Route53) + RECTANGLE(ALB) as alb + RECTANGLE("AutoScalingGroup") as asg + RECTANGLE("LaunchTemplate") as lt + RECTANGLE("AMI") as ami + } + } + } + } + Godaddy -> Route53: ns api.swarms.ai + Route53 -> alb: CNAME + alb -> asg + asg -> lt + lt -> ami: "components/machine_image/ubuntu-fastapi.pkr.hcl" + + package "EC2 Instance" { + RECTANGLE("EC2") { + RECTANGLE("UserData") as userdata + RECTANGLE("InstanceProfile") as ip + RECTANGLE("Nginx") as nginx + RECTANGLE("Systemd") as systemd + RECTANGLE("Uvicorn") as uvicorn + RECTANGLE("FastAPI") as fastapi + + EC2 -> userdata: "github clone branch run api/install.sh" + EC2 -> ip: "read access to SSM secrets" + EC2 -> nginx: "isolates user process" + EC2 -> systemd: "manages services" + EC2 -> uvicorn: "4 instances of FastAPI" + } + } + + userdata -> EC2: "execute boot.sh" + ip -> EC2: "access tagged secrets" + nginx -> systemd: "manage processes" +} + +package "GitHub Actions" { + RECTANGLE("OIDC") { + RECTANGLE("AWS") + RECTANGLE("SSM Document") + RECTANGLE("Terraform") + } + OIDC -> AWS + AWS -> SSM Document: "update server" + OIDC -> AWS: "create new clusters" +} + +package "Shared Hosted AWS Account" { + RECTANGLE("Profiles") { + RECTANGLE("Tags") + RECTANGLE("VPC") + } + Profiles -> Tags: "customer resources" + Profiles -> VPC: "subnet resources" + Profiles -> EC2: "instance roles" +} + +note right of ami: "Designed for\nFastAPI deployments" +@enduml +``` + +### Key Components: +1. **Accounts/Swarms**: Represents the main framework for your smart agent server. +2. **Environments**: Contains different environments such as `dev` in `us-east-1`. +3. **EC2 Instance**: Details the EC2 setup, including user data and necessary components (Nginx, Systemd, Uvicorn). +4. **GitHub Actions**: Integrates CI/CD using OIDC, SSM, and Terraform. +5. **Shared Hosted AWS Account**: Manages profiles, tags, and VPC configurations for customer resources. + +You can copy this code into a PlantUML tool to visualize the deployment diagram. +### Explanation: +- **Packages and Rectangles**: The diagram encapsulates components like Route 53, ALB, and Auto Scaling Group within a structured format. +- **Interactions**: Arrows show the relationships and interactions among components, such as the CNAME setup with Route 53 and the EC2 instances launching user data scripts. +- **GitHub Actions**: Represents automation activities and connections to AWS services. +- **Shared AWS Account**: Displays customer management and tagging for resources. + +This structure captures your specifications for the deployment of a self-service Cognito-enabled smart agent server using Terraform and AWS components effectively. Adjust as necessary for your specific documentation needs!``` + +### Explanation: +- The main structure includes the accounts, environments, EC2 instances, GitHub Actions, and shared AWS resources. +- The individual components and their interactions are represented using rectangles for entities and arrows for relationships. +- Customize further by adding details and refining the relationships based on your specific needs.``` + +To manage a shared hosted AWS account for multiple customers with the requirements you've laid out, you can consider the following architecture: + +### AWS Account Structure: +1. **Profiles**: Utilize IAM roles and users for each customer. Create separate profiles in the AWS CLI for easy access. + +2. **Resource Tagging**: Implement a consistent tagging strategy for all resources (e.g., `CustomerName`, `Project`, `Environment`, `CostCenter`) to facilitate cost management and resource identification. + +3. **API Call Restrictions**: Use IAM policies to specify which API calls each customer's role can perform based on their needs. + +4. **Cost Tracking**: Leverage AWS Cost Explorer and AWS Budgets, filtering by tags to track costs per customer. + +5. **Networking**: Set up custom VPCs for customers if isolation is required. Use subnets and security groups to segment resources. + +### Server and Agent Management: +1. **AWS Roles for Agents**: Each agent (server) should have an IAM role assigned that grants necessary permissions. If using Lambda, this can be done via execution roles. + +2. **Unix User Management**: Depending on your requirements: + - One option is to create a Unix user for each customer/agent. + - Assign unique home directories, storage solutions (e.g., EBS, S3), and ensure proper permissions. + +3. **Containerization**: Use Docker and Kubernetes (EKS) to manage applications. This allows for easier resource allocation and isolation between different customers. + +4. **Email & Backup**: Consider using Amazon SES for email needs and AWS Backup/CloudFormation for automated backups of customer data. + +### Best Practices: +- **Security**: Regularly audit IAM roles and policies for least privilege access. +- **Monitoring**: Use CloudWatch for logging and monitoring resource utilization. +- **Cost Management**: Use AWS Cost Allocation Reports to analyze customer costs effectively. + +This architecture ensures streamlined operations, cost tracking, and enhanced security across customer resources in a shared AWS account. + + + +### Repo Structure Overview + +- **Root Directory**: The main container of your Terraform project (`terraform-template/`). + +- **README.md**: Provides an overview and documentation for the project, helping users understand its purpose and how to use it. + +- **Environments**: Contains specific configurations for different environments (e.g., development, staging). + - **swarms-aws-agent-api**: An example service with various setups. + - **dev/us-east-1**: The development environment tailored for the US East (N. Virginia) region. + +- **Modules**: Encapsulates reusable Terraform code to ensure DRY (Don't Repeat Yourself) principles. + - **swarms**: A collection of Terraform modules related to the swarm infrastructure. + +### Components Explained + +- **DNS Management**: + - Using GoDaddy for domain management and AWS Route 53 for DNS routing (e.g., `api.swarms.ai`). + +- **Application Layer**: + - Includes an Application Load Balancer directing traffic to an Auto Scaling Group, which manages instances defined by a launch template. + +- **Instance Management**: + - AMIs (Amazon Machine Images) created using Packer (e.g., `ubuntu-fastapi.pkr.hcl`). + +- **Settings for EC2 Instances**: + - User data scripts to set up necessary configurations, such as cloning from GitHub and running scripts as the `swarms` user to set up the FastAPI application. + +- **Access Management**: + - IAM roles and instance profiles are created to manage permissions. SSM (AWS Systems Manager) secrets are tagged, allowing specific servers to access configurations, like OpenAI keys. + +- **Networking**: + - Outbound traffic settings defined to control which IP ranges can communicate with the infrastructure. + +- **Application Deployment**: + - Utilizes Nginx and Systemd to manage FastAPI processes, ensuring multiple instances run smoothly. + +### CI/CD with GitHub Actions +- Implements OIDC connections to AWS for secure deployments and updates using GitHub Actions. +- SSM documents are utilized for executing specific management tasks, like creating new clusters and updating configurations seamlessly. + +This structure facilitates a modular, maintainable, and automated approach to infrastructure as code using Terraform, alongside solid CI/CD practices. + + + + diff --git a/environments/call-swarms/deploy-docker.yaml b/environments/call-swarms/deploy-docker.yaml new file mode 100644 index 0000000..198e347 --- /dev/null +++ b/environments/call-swarms/deploy-docker.yaml @@ -0,0 +1,57 @@ +--- +description: | + ### Document name - Deploy Swarms + ## What does this document do? + It runs swarms commands + ## Input Parameters + * git_url: the source + * git_version: The version + * git_name: The remote name, only set once + ## Output Parameters + None. +schemaVersion: '2.2' +parameters: + GitUrl: + type: String + description: "(Required) The url." + allowedPattern: "^.+$" + GitName: + type: String + description: "(Required) The url." + allowedPattern: "^.+$" + GitVersion: + type: String + description: "(Required) The version." + allowedPattern: "^.+$" + +mainSteps: +- action: aws:runShellScript + name: DeployDocker + inputs: + runCommand: + - | + #!/bin/bash + set -x + # now change the ownership of /opt/swarms to the user swarms + chown -R swarms:swarms /opt/swarms + # echo generate this script to run as swarms to pull in latest version + tee /opt/swarms/update_docker_swarms.sh <<'EOF' + cd /opt/swarms/ + export HOME=/home/swarms/ + git config --global user.email "git-agent@swarms.ai" + git config --global user.name "git agent" + git remote add {{GitName}} {{GitUrl}} || echo oops + git remote set-url {{GitName}} {{GitUrl}} || echo oops2 + echo git url {{GitUrl}} + git fetch --all|| echo oops + git pull {{GitName}} {{GitVersion}}|| echo oops + git checkout --force {{GitVersion}} || echo checkout + git log -3 || echo git fail + ls /opt/swarms/api/*.sh || echo no files + EOF + + sudo chown swarms /opt/swarms/update_docker_swarms.sh + su -c "bash -e -x /opt/swarms/update_docker_swarms.sh" swarms + + sudo bash -e -x /opt/swarms/api/docker-boot.sh # needs root access + diff --git a/environments/call-swarms/deploy.yaml b/environments/call-swarms/deploy.yaml new file mode 100644 index 0000000..166cd1f --- /dev/null +++ b/environments/call-swarms/deploy.yaml @@ -0,0 +1,41 @@ +--- +description: | + ### Document name - Deploy Swarms + ## What does this document do? + It runs swarms commands + ## Input Parameters + * git_url: the source + * git_version: The version + * git_name: The remote name, only set once + ## Output Parameters + None. +schemaVersion: '2.2' +parameters: + GitUrl: + type: String + description: "(Required) The url." + allowedPattern: "^.+$" + GitName: + type: String + description: "(Required) The url." + allowedPattern: "^.+$" + GitVersion: + type: String + description: "(Required) The version." + allowedPattern: "^.+$" + +mainSteps: +- action: aws:runShellScript + name: Deploy + inputs: + runCommand: + - | + #!/bin/bash + cd /opt/swarms/ + sudo git remote add {{GitName}} {{GitUrl}} || echo oops + sudo git checkout --force {{GitVersion}} + sudo git pull {{GitName}} || echo oops2 + #sudo bash -x /opt/swarms/api/install.sh + sudo su -c "bash -e -x /var/swarms/agent_workspace/boot.sh" swarms + sudo systemctl daemon-reload + systemctl restart swarms-uvicorn || journalctl -xeu swarms-uvicorn.service diff --git a/environments/call-swarms/main.tf b/environments/call-swarms/main.tf new file mode 100644 index 0000000..4829396 --- /dev/null +++ b/environments/call-swarms/main.tf @@ -0,0 +1,119 @@ +# from +# https://github.com/shaikis/terraform-aws-ssm-document.git + +locals { + module_path = abspath(path.module) + codebase_root_path = abspath("${path.module}/../..") +} + +resource "aws_ssm_document" "deploy" { + name = "deploy" + document_format = "YAML" + document_type = "Command" + + content = file("${local.codebase_root_path}/environments/call-swarms/deploy.yaml") + tags = { env = "test" } +} + +resource "aws_ssm_document" "deploy-docker" { + name = "deploy-docker" + document_format = "YAML" + document_type = "Command" + content = file("${local.codebase_root_path}/environments/call-swarms/deploy-docker.yaml") + tags = { env = "test" } +} + + +# create a terraform code to deploy this and attach +# To allow the specified `ssm:SendCommand` operation, you need to create an IAM policy that grants the necessary permissions for the assumed role. The policy should be attached to the role `github`. Here’s a sample IAM policy: + +# ```json +# { +# "Version": "2012-10-17", +# "Statement": [ +# { +# "Effect": "Allow", +# "Action": [ +# "ssm:SendCommand", +# "ssm:ListCommands", +# "ssm:GetCommandInvocation" +# ], +# "Resource": "*" +# }, +# { +# "Effect": "Allow", +# "Action": [ +# "ec2:DescribeInstances" +# ], +# "Resource": "*" +# } +# ] +# } +# ``` + +# ### Steps to implement: + +# 1. **Go to the IAM Console** in your AWS Management Console. +# 2. **Locate the Role**: Search for the `github` role. +# 3. **Attach Policy**: +# - Go to the "Permissions" tab and click "Add inline policy." +# - Choose "JSON" and paste the policy above into the policy editor. +# - Review and give the policy a name, then save it. + +# ### Additional Notes: +# - Adjust the `Resource` element if you want to restrict access to specific resources rather than all (`*`). For example, you can specify the ARNs of specific EC2 instances or SSM documents. +# - Always follow the principle of least privilege to ensure you only grant the permissions that are necessary. + +# To deploy an IAM policy for the `github` role that allows the `ssm:SendCommand` operation, you can use Terraform. Below is a sample Terraform code snippet to create the IAM policy and attach it to the `github` role: + +# ```hcl +# provider "aws" { +# region = "us-east-1" # Change to your desired region +# } +# resource "aws_iam_policy" "github_ssm_policy" { +# name = "GitHubSSMPolicy" +# description = "Policy to allow SSM commands for GitHub role" + +# policy = jsonencode({ +# Version = "2012-10-17", +# Statement = [ +# { +# Effect = "Allow", +# Action = [ +# "ssm:SendCommand", +# "ssm:ListCommands", +# "ssm:GetCommandInvocation" +# ], +# Resource = "*" +# }, +# { +# Effect = "Allow", +# Action = [ +# "ec2:DescribeInstances" +# ], +# Resource = "*" +# } +# ] +# }) +# } + +# resource "aws_iam_role_policy_attachment" "attach_github_ssm_policy" { +# policy_arn = aws_iam_policy.github_ssm_policy.arn +# role = "github" # Ensure this matches your IAM role +# } + +# output "policy_arn" { +# value = aws_iam_policy.github_ssm_policy.arn +# } +# ``` + +# ### Instructions: + + +# 1. **Install Terraform** if you haven't already. +# 2. **Configure AWS Credentials**: Ensure your AWS credentials are set up properly (e.g., using `aws configure`). +# 3. **Create a new Terraform file** (e.g., `main.tf`) and paste the above code into it. +# 4. **Initialize Terraform**: Run `terraform init` in your terminal to initialize the working directory. +# 5. **Apply the Terraform configuration**: Run `terraform apply`, and confirm the changes when prompted. + +# This code will create a new IAM policy that allows the specified actions and automatically attach it to the existing `github` role. Adjust the policy as needed for your security requirements. diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Makefile b/environments/eliza-agent-api/Makefile similarity index 52% rename from environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Makefile rename to environments/eliza-agent-api/Makefile index 307e8a1..6a6bebf 100644 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Makefile +++ b/environments/eliza-agent-api/Makefile @@ -1,4 +1,4 @@ doit : tofu init tofu plan - tofu apply + tofu apply -auto-approve diff --git a/environments/eliza-agent-api/Readme.md b/environments/eliza-agent-api/Readme.md new file mode 100644 index 0000000..144ec90 --- /dev/null +++ b/environments/eliza-agent-api/Readme.md @@ -0,0 +1,10 @@ +# Done + +1. create vpc +2. create iam roles and users +3. create asg and alb and route53 and acm + +# Todo + +1. create cognito pool +2. create ami diff --git a/environments/eliza-agent-api/cloudflare.tf b/environments/eliza-agent-api/cloudflare.tf new file mode 100644 index 0000000..e066b4a --- /dev/null +++ b/environments/eliza-agent-api/cloudflare.tf @@ -0,0 +1,4 @@ +#provider "cloudflare" { +# api_token = var.cloudflare_api_token +#} +#export CLOUDFLARE_API_TOKEN=`cat ~/.cloudflare` diff --git a/environments/eliza-agent-api/components/Readme.md b/environments/eliza-agent-api/components/Readme.md new file mode 100644 index 0000000..a1e5f73 --- /dev/null +++ b/environments/eliza-agent-api/components/Readme.md @@ -0,0 +1,46 @@ +# Plan +## Phase 1 + +0. create standard vpc with a private network to host ec2, +so we will put the intances in public for now and use security groups to limit access. +1. create minimal ec2 instance in machine_image +terraform for ubuntu python uvicorn fastapi nginx systemd server with custom code pulled in via git +2. create minimal ec2 ami from instance in machine_image +3. create autoscaling_group of size 1 for image +4. create application load balancer +5. create dns_entry +6. packer ami for ubuntu python uvicorn fastapi nginx systemd server with custom git modules +7. oidc connect from github to aws + +# todo + +0. create role for developers to run ssm job +to deploy new service. +1. call from github action. +2. have least privlege +3. allow for calling ssm with information about context +4. route to server +5. retrieve logs +6. provision other services to be used by the agents + +0. run_only to only run the server without installing everything +1. alb sticky sessions :send users back to server via sticky sessions or some id. +2. create cognito user pool for login +4. create work_queue +5. create lambda_workers on queue +6. create resource_launchers to create new resources. +7. use fine grained roles +https://github.com/cloudposse/terraform-aws-ssm-iam-role.git +8. create user home directories for different agent +9. look at natgw alternatives +that costs money https://aws.amazon.com/vpc/pricing/ +10. check in copy of swagger +11. swarms router +12. fluid api +13. agent service discovery +14. setup ticketing interface +10. packer ami for ubuntu python uvicorn fastapi nginx systemd server with custom git modules +11. setup cicd +12. create job to generate diagrams and documentation from site +13. use agent to improve code + diff --git a/environments/eliza-agent-api/components/application_load_balancer/Readme.md b/environments/eliza-agent-api/components/application_load_balancer/Readme.md new file mode 100644 index 0000000..0ca19bd --- /dev/null +++ b/environments/eliza-agent-api/components/application_load_balancer/Readme.md @@ -0,0 +1,3 @@ +application_load_balancer + +https://github.com/terraform-aws-modules/terraform-aws-alb.git diff --git a/environments/eliza-agent-api/components/application_load_balancer/cloudflare.tf b/environments/eliza-agent-api/components/application_load_balancer/cloudflare.tf new file mode 100644 index 0000000..be2ab3b --- /dev/null +++ b/environments/eliza-agent-api/components/application_load_balancer/cloudflare.tf @@ -0,0 +1,18 @@ +#variable "dns_name" {} +#variable "account_id" {} +# data "cloudflare_zone" "zone" { +# count =0 +# name = var.dns_name +# account_id = var.account_id +# } + +# resource "cloudflare_record" "aws-ns-record" { +# count = 0 +# # count = "${length(aws_route53_zone.primary.name_servers)}" +# #domain = "${var.domain_name}" +# name = var.domain_name +# # zone_id = data.cloudflare_zone[0].zone.id +# content = "${element(aws_route53_zone.primary.name_servers, count.index)}" +# type = "NS" +# priority = 1 +# } diff --git a/environments/eliza-agent-api/components/application_load_balancer/https/main.tf b/environments/eliza-agent-api/components/application_load_balancer/https/main.tf new file mode 100644 index 0000000..0b8f4d1 --- /dev/null +++ b/environments/eliza-agent-api/components/application_load_balancer/https/main.tf @@ -0,0 +1,110 @@ +variable "alb_arn" {} +variable "domain_name" {} +variable "zone_id" {} +variable "prod_target_group_arn" {} +variable "test_target_group_arn" {} +variable "dev_target_group_arn" {} +variable "mcs_target_group_arn" {} +variable "mcs_dev_target_group_arn" {} + +module "acm" { + # count = 0 + source = "terraform-aws-modules/acm/aws" + version = "~> 4.0" + domain_name = var.domain_name + zone_id = var.zone_id + #wait_for_validation = false + subject_alternative_names = [ + "*.${var.domain_name}" + ] +} + +## add a rule for matching urls for /v1/// and routing that to a new target group +#like var.aws_lb_target_group{ { user, agent, api, target group name}} +resource "aws_lb_listener" "this" { + port = 443 + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-Res-2021-06" + certificate_arn = module.acm.acm_certificate_arn + load_balancer_arn = var.alb_arn + default_action { + target_group_arn = var.prod_target_group_arn + type = "forward" + } +} + + +# make a new one for checking for name is test.api.swarms.ai +resource "aws_lb_listener_rule" "route_v1_api" { + listener_arn = aws_lb_listener.this.arn + priority = 200 # Set priority as needed, must be unique + + action { + type = "forward" + target_group_arn = var.test_target_group_arn # New target group's ARN + } + + condition { + host_header { + values = ["test.api.swarms.ai"] + } + } +} + +resource "aws_lb_listener_rule" "route_v1_api_dev" { + listener_arn = aws_lb_listener.this.arn + priority = 100 # Set priority as needed, must be unique + + action { + type = "forward" + target_group_arn = var.dev_target_group_arn # New target group's ARN + } + + condition { + host_header { + values = ["dev.api.swarms.ai"] + } + } +} + +resource "aws_lb_listener_rule" "route_v1_api_mcs" { + listener_arn = aws_lb_listener.this.arn + priority = 101 # Set priority as needed, must be unique + + action { + type = "forward" + target_group_arn = var.mcs_target_group_arn # New target group's ARN + } + + condition { + host_header { + values = ["mcs.api.swarms.ai"] + } + } +} + +resource "aws_lb_listener_rule" "route_v1_api_mcs_dev" { + listener_arn = aws_lb_listener.this.arn + priority = 102 # Set priority as needed, must be unique + + action { + type = "forward" + target_group_arn = var.mcs_dev_target_group_arn # New target group's ARN + } + + condition { + host_header { + values = ["dev.mcs.api.swarms.ai"] + } + } +} + +resource "aws_lb_listener" "insecure" { + port = 80 + protocol = "HTTP" + load_balancer_arn = var.alb_arn + default_action { + target_group_arn = var.prod_target_group_arn + type = "forward" + } +} diff --git a/environments/eliza-agent-api/components/application_load_balancer/main.tf b/environments/eliza-agent-api/components/application_load_balancer/main.tf new file mode 100644 index 0000000..d230c94 --- /dev/null +++ b/environments/eliza-agent-api/components/application_load_balancer/main.tf @@ -0,0 +1,112 @@ +variable "vpc_id" {} +variable "security_group_id" {} # = local.name +variable "name" {} # = local.name +variable "domain_name" {} +variable "public_subnets" {} #= module.vpc.public_subnets + +data "aws_availability_zones" "available" {} + +locals { + name = "ex-${basename(path.cwd)}" + + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + + tags = { + Name = local.name + Example = local.name + Repository = "https://github.com/terraform-aws-modules/terraform-aws-alb" + } +} + +module "alb" { + source = "terraform-aws-modules/alb/aws" + version = "9.12.0" + name = "${var.name}-api" # local.name + vpc_id = var.vpc_id # module.vpc.vpc_id + subnets = var.public_subnets # module.vpc.public_subnets + enable_deletion_protection = false + create_security_group = false + security_groups = [var.security_group_id] + client_keep_alive = 7200 + tags = local.tags +} + + + +output "dns" { + value = module.alb.dns_name +} + +module "route53" { + source = "./route53/" + alb_dns_name = module.alb.dns_name + alb_dns_zone = module.alb.zone_id + domain_name = var.domain_name +} + +module "tg_prod" { + source = "./target_group/" + name_prefix = "swarms" + vpc_id = var.vpc_id # module.vpc.vpc_id +} + +module "tg_test" { + source = "./target_group/" + name_prefix = "test" + vpc_id = var.vpc_id # module.vpc.vpc_id +} + +module "tg_dev" { + source = "./target_group/" + name_prefix = "dev" + vpc_id = var.vpc_id # module.vpc.vpc_id +} + +module "tg_mcs" { + source = "./target_group/" + name_prefix = "mcs" + check_path = "/v1/medical-coder/patients" # this does not have a docs api yet + vpc_id = var.vpc_id # module.vpc.vpc_id +} + +module "tg_mcs_dev" { + source = "./target_group/" + name_prefix = "mcsdev" + check_path = "/v1/medical-coder/patients" # this does not have a docs api yet + vpc_id = var.vpc_id # module.vpc.vpc_id +} + +module "https" { + source = "./https/" + # vpc_id = var.vpc_id # module.vpc.vpc_id + zone_id = module.route53.primary_zone_id + domain_name = var.domain_name + alb_arn = module.alb.arn + prod_target_group_arn = module.tg_prod.alb_target_group_arn + test_target_group_arn = module.tg_test.alb_target_group_arn + dev_target_group_arn = module.tg_dev.alb_target_group_arn + mcs_target_group_arn = module.tg_mcs.alb_target_group_arn + mcs_dev_target_group_arn = module.tg_mcs_dev.alb_target_group_arn +} + + +output "prod_alb_target_group_arn" { + value = module.tg_prod.alb_target_group_arn +} + +output "test_alb_target_group_arn" { + value = module.tg_test.alb_target_group_arn +} + +output "dev_alb_target_group_arn" { + value = module.tg_dev.alb_target_group_arn +} + +output "mcs_alb_target_group_arn" { + value = module.tg_mcs.alb_target_group_arn +} + +output "mcs_dev_alb_target_group_arn" { + value = module.tg_mcs_dev.alb_target_group_arn +} diff --git a/environments/eliza-agent-api/components/application_load_balancer/notes.prg b/environments/eliza-agent-api/components/application_load_balancer/notes.prg new file mode 100644 index 0000000..df1c054 --- /dev/null +++ b/environments/eliza-agent-api/components/application_load_balancer/notes.prg @@ -0,0 +1,479 @@ +# access_logs = { +# bucket = module.log_bucket.s3_bucket_id +# prefix = "access-logs" +# } + +# connection_logs = { +# bucket = module.log_bucket.s3_bucket_id +# enabled = true +# prefix = "connection-logs" +# } + + # ex-http-https-redirect = { + # port = 80 + # protocol = "HTTP" + # redirect = { + # port = "443" + # protocol = "HTTPS" + # status_code = "HTTP_301" + # } + # } + # rules = { + # ex-fixed-response = { + # priority = 3 + # actions = [{ + # type = "fixed-response" + # content_type = "text/plain" + # status_code = 200 + # message_body = "This is a fixed response" + # }] + # conditions = [{ + # http_header = { + # http_header_name = "x-Gimme-Fixed-Response" + # values = ["yes", "please", "right now"] + # } + # }] + # } + # ex-weighted-forward = { + # priority = 4 + # actions = [{ + # type = "weighted-forward" + # target_groups = [ + # { + # target_group_key = "ex-lambda-with-trigger" + # weight = 2 + # }, + # { + # target_group_key = "ex-instance" + # weight = 1 + # } + # ] + # stickiness = { + # enabled = true + # duration = 3600 + # } + # }] + # conditions = [{ + # query_string = { + # key = "weighted" + # value = "true" + # } + # }] + # } + # ex-redirect = { + # priority = 5000 + # actions = [{ + # type = "redirect" + # status_code = "HTTP_302" + # host = "www.youtube.com" + # path = "/watch" + # query = "v=dQw4w9WgXcQ" + # protocol = "HTTPS" + # }] + # conditions = [{ + # query_string = [{ + # key = "video" + # value = "random" + # }, + # { + # key = "image" + # value = "next" + # }] + # }] + # } + # } + # } + # ex-http-weighted-target = { + # port = 81 + # protocol = "HTTP" + # weighted_forward = { + # target_groups = [ + # { + # target_group_key = "ex-lambda-with-trigger" + # weight = 60 + # }, + # { + # target_group_key = "ex-instance" + # weight = 40 + # } + # ] + # } + # } + # ex-fixed-response = { + # port = 82 + # protocol = "HTTP" + # fixed_response = { + # content_type = "text/plain" + # message_body = "Fixed message" + # status_code = "200" + # } + # } + # rules = { + # ex-cognito = { + # actions = [ + # { + # type = "authenticate-cognito" + # on_unauthenticated_request = "authenticate" + # session_cookie_name = "session-${local.name}" + # session_timeout = 3600 + # user_pool_arn = aws_cognito_user_pool.this.arn + # user_pool_client_id = aws_cognito_user_pool_client.this.id + # user_pool_domain = aws_cognito_user_pool_domain.this.domain + # }, + # { + # type = "forward" + # target_group_key = "ex-instance" + # } + # ] + # conditions = [{ + # path_pattern = { + # values = ["/some/auth/required/route"] + # } + # }] + # } + # ex-fixed-response = { + # priority = 3 + # actions = [{ + # type = "fixed-response" + # content_type = "text/plain" + # status_code = 200 + # message_body = "This is a fixed response" + # }] + # conditions = [{ + # http_header = { + # http_header_name = "x-Gimme-Fixed-Response" + # values = ["yes", "please", "right now"] + # } + # }] + # } + # ex-weighted-forward = { + # priority = 4 + # actions = [{ + # type = "weighted-forward" + # target_groups = [ + # { + # target_group_key = "ex-instance" + # weight = 2 + # }, + # { + # target_group_key = "ex-lambda-with-trigger" + # weight = 1 + # } + # ] + # stickiness = { + # enabled = true + # duration = 3600 + # } + # }] + # conditions = [{ + # query_string = { + # key = "weighted" + # value = "true" + # }, + # path_pattern = { + # values = ["/some/path"] + # } + # }] + # } + # ex-redirect = { + # priority = 5000 + # actions = [{ + # type = "redirect" + # status_code = "HTTP_302" + # host = "www.youtube.com" + # path = "/watch" + # query = "v=dQw4w9WgXcQ" + # protocol = "HTTPS" + # }] + # conditions = [{ + # query_string = { + # key = "video" + # value = "random" + # } + # }] + # } + # } + # ex-cognito = { + # port = 444 + # protocol = "HTTPS" + # certificate_arn = module.acm.acm_certificate_arn + # authenticate_cognito = { + # authentication_request_extra_params = { + # display = "page" + # prompt = "login" + # } + # on_unauthenticated_request = "authenticate" + # session_cookie_name = "session-${local.name}" + # session_timeout = 3600 + # user_pool_arn = aws_cognito_user_pool.this.arn + # user_pool_client_id = aws_cognito_user_pool_client.this.id + # user_pool_domain = aws_cognito_user_pool_domain.this.domain + # } + # forward = { + # target_group_key = "ex-instance" + # } + # rules = { + # ex-oidc = { + # priority = 2 + # actions = [ + # { + # type = "authenticate-oidc" + # authentication_request_extra_params = { + # display = "page" + # prompt = "login" + # } + # authorization_endpoint = "https://${var.domain_name}/auth" + # client_id = "client_id" + # client_secret = "client_secret" + # issuer = "https://${var.domain_name}" + # token_endpoint = "https://${var.domain_name}/token" + # user_info_endpoint = "https://${var.domain_name}/user_info" + # }, + # { + # type = "forward" + # target_group_key = "ex-lambda-with-trigger" + # } + # ] + # conditions = [{ + # host_header = { + # values = ["foobar.com"] + # } + # }] + # } + # } + # } + # ex-oidc = { + # port = 445 + # protocol = "HTTPS" + # certificate_arn = module.acm.acm_certificate_arn + # action_type = "authenticate-oidc" + # authenticate_oidc = { + # authentication_request_extra_params = { + # display = "page" + # prompt = "login" + # } + # authorization_endpoint = "https://${var.domain_name}/auth" + # client_id = "client_id" + # client_secret = "client_secret" + # issuer = "https://${var.domain_name}" + # token_endpoint = "https://${var.domain_name}/token" + # user_info_endpoint = "https://${var.domain_name}/user_info" + # } + # forward = { + # target_group_key = "ex-instance" + # } + # } + # } + # target_group_health = { + # dns_failover = { + # minimum_healthy_targets_count = 2 + # } + # unhealthy_state_routing = { + # minimum_healthy_targets_percentage = 50 + # } + # } + # health_check = { + # enabled = true + # interval = 30 + # path = "/healthz" + # port = "traffic-port" + # healthy_threshold = 3 + # unhealthy_threshold = 3 + # timeout = 6 + # protocol = "HTTP" + # matcher = "200-399" + # } + # ex-lambda-with-trigger = { + # name_prefix = "l1-" + # target_type = "lambda" + # lambda_multi_value_headers_enabled = true + # target_id = module.lambda_with_allowed_triggers.lambda_function_arn + # } + # ex-lambda-without-trigger = { + # name_prefix = "l2-" + # target_type = "lambda" + # target_id = module.lambda_without_allowed_triggers.lambda_function_arn + # attach_lambda_permission = true + # } + # } + # additional_target_group_attachments = { + # ex-instance-other = { + # target_group_key = "ex-instance" + # target_type = "instance" + # target_id = aws_instance.other.id + # port = "80" + # } + # } + # # Route53 Record(s) + # route53_records = { + # A = { + # name = local.name + # type = "A" + # zone_id = data.aws_route53_zone.this.id + # } + # AAAA = { + # name = local.name + # type = "AAAA" + # zone_id = data.aws_route53_zone.this.id + # } + # } + + +#module "alb_disabled" { +# source = "../../"# +# +# create = false +#} + +# ################################################################################ +# # Using packaged function from Lambda module +# ################################################################################ + +# locals { +# package_url = "https://raw.githubusercontent.com/terraform-aws-modules/terraform-aws-lambda/master/examples/fixtures/python3.8-zip/existing_package.zip" +# downloaded = "downloaded_package_${md5(local.package_url)}.zip" +# } + +# resource "null_resource" "download_package" { +# triggers = { +# downloaded = local.downloaded +# } + +# provisioner "local-exec" { +# command = "curl -L -o ${local.downloaded} ${local.package_url}" +# } +# } + +# module "lambda_with_allowed_triggers" { +# source = "terraform-aws-modules/lambda/aws" +# version = "~> 6.0" + +# function_name = "${local.name}-with-allowed-triggers" +# description = "My awesome lambda function (with allowed triggers)" +# handler = "index.lambda_handler" +# runtime = "python3.8" + +# publish = true +# create_package = false +# local_existing_package = local.downloaded + +# allowed_triggers = { +# AllowExecutionFromELB = { +# service = "elasticloadbalancing" +# source_arn = module.alb.target_groups["ex-lambda-with-trigger"].arn +# } +# } + +# depends_on = [null_resource.download_package] +# } + +# module "lambda_without_allowed_triggers" { +# source = "terraform-aws-modules/lambda/aws" +# version = "~> 6.0" + +# function_name = "${local.name}-without-allowed-triggers" +# description = "My awesome lambda function (without allowed triggers)" +# handler = "index.lambda_handler" +# runtime = "python3.8" + +# publish = true +# create_package = false +# local_existing_package = local.downloaded + +# # Allowed triggers will be managed by ALB module +# allowed_triggers = {} + +# depends_on = [null_resource.download_package] +# } + +# ################################################################################ +# # Supporting resources +# ################################################################################ + +# module "vpc" { +# source = "terraform-aws-modules/vpc/aws" +# version = "~> 5.0" + +# name = local.name +# cidr = local.vpc_cidr + +# azs = local.azs +# private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] +# public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + +# tags = local.tags +# } + +# module "wildcard_cert" { +# source = "terraform-aws-modules/acm/aws" +# version = "~> 4.0" + +# domain_name = "*.${var.domain_name}" +# zone_id = data.aws_route53_zone.this.id +# } + +# data "aws_ssm_parameter" "al2" { +# name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2" +# } + +# resource "aws_instance" "this" { +# ami = data.aws_ssm_parameter.al2.value +# instance_type = "t3.nano" +# subnet_id = element(module.vpc.private_subnets, 0) +# } + +# resource "aws_instance" "other" { +# ami = data.aws_ssm_parameter.al2.value +# instance_type = "t3.nano" +# subnet_id = element(module.vpc.private_subnets, 0) +# } + +# ################################################################## +# # AWS Cognito User Pool +# ################################################################## + +# resource "aws_cognito_user_pool" "this" { +# name = "user-pool-${local.name}" +# } + +# resource "aws_cognito_user_pool_client" "this" { +# name = "user-pool-client-${local.name}" +# user_pool_id = aws_cognito_user_pool.this.id +# generate_secret = true +# allowed_oauth_flows = ["code", "implicit"] +# callback_urls = ["https://${var.domain_name}/callback"] +# allowed_oauth_scopes = ["email", "openid"] +# allowed_oauth_flows_user_pool_client = true +# } + +# resource "random_string" "this" { +# length = 5 +# upper = false +# special = false +# } + +# resource "aws_cognito_user_pool_domain" "this" { +# domain = "${local.name}-${random_string.this.result}" +# user_pool_id = aws_cognito_user_pool.this.id +# } + +#module# "log_bucket" { +# source = "terraform-aws-modules/s3-bucket/aws" +# version = "~> 3.0" + +# bucket_prefix = "${local.name}-logs-" +# acl = "log-delivery-write" + +# # For example only +# force_destroy = true + +# control_object_ownership = true +# object_ownership = "ObjectWriter" + +# attach_elb_log_delivery_policy = true # Required for ALB logs +# attach_lb_log_delivery_policy = true # Required for ALB/NLB logs + +# attach_deny_insecure_transport_policy = true +# attach_require_latest_tls_policy = true +# tags = local.tags +#} diff --git a/environments/eliza-agent-api/components/application_load_balancer/route53/main.tf b/environments/eliza-agent-api/components/application_load_balancer/route53/main.tf new file mode 100644 index 0000000..ad074f9 --- /dev/null +++ b/environments/eliza-agent-api/components/application_load_balancer/route53/main.tf @@ -0,0 +1,69 @@ +variable "domain_name" {} # = local.name +variable "alb_dns_name" {} +variable "alb_dns_zone" {} +data "aws_route53_zone" "primary" { + name = var.domain_name +} + +resource "aws_route53_record" "api-cname" { + zone_id = data.aws_route53_zone.primary.zone_id + name = var.domain_name + # type = "CNAME" + type = "A" + # ttl = 5 + + # weighted_routing_policy { + # weight = 10 + # } + #set_identifier = "dev" + alias { + name = var.alb_dns_name + zone_id = var.alb_dns_zone + evaluate_target_health = true + + # + } +} + +resource "aws_route53_record" "api-cname-test" { + zone_id = data.aws_route53_zone.primary.zone_id + name = "test.${var.domain_name}" + type = "CNAME" + records = [aws_route53_record.api-cname.fqdn] + ttl = 300 +} + +resource "aws_route53_record" "api-cname-dev" { + zone_id = data.aws_route53_zone.primary.zone_id + name = "dev.${var.domain_name}" + type = "CNAME" + records = [aws_route53_record.api-cname.fqdn] + ttl = 300 + +} + +# resource "aws_route53_record" "api-cname-mcs" { +# zone_id = data.aws_route53_zone.primary.zone_id +# name = "mcs.${var.domain_name}" +# type = "CNAME" +# records = [aws_route53_record.api-cname.fqdn] +# ttl = 300 +# } + +# resource "aws_route53_record" "api-cname-mcs-dev" { +# zone_id = data.aws_route53_zone.primary.zone_id +# name = "dev.mcs.${var.domain_name}" +# type = "CNAME" +# records = [aws_route53_record.api-cname.fqdn] +# ttl = 300 +# } + +output "cname" { + value = aws_route53_record.api-cname.fqdn +} +output "zone" { + value = data.aws_route53_zone.primary +} +output "primary_zone_id" { + value = data.aws_route53_zone.primary.zone_id +} diff --git a/environments/eliza-agent-api/components/application_load_balancer/target_group/main.tf b/environments/eliza-agent-api/components/application_load_balancer/target_group/main.tf new file mode 100644 index 0000000..f0efe60 --- /dev/null +++ b/environments/eliza-agent-api/components/application_load_balancer/target_group/main.tf @@ -0,0 +1,40 @@ +variable "vpc_id" {} +variable "name_prefix" {} +variable "check_path" { +default = "/v1/docs" # the docs api +} +resource "aws_lb_target_group" "this" { + name_prefix = var.name_prefix + protocol = "HTTP" + port = 80 + target_type = "instance" + vpc_id = var.vpc_id + deregistration_delay = 10 + #load_balancing_algorithm_type = "weighted_random" + #load_balancing_anomaly_mitigation = "on" + #load_balancing_cross_zone_enabled = false + protocol_version = "HTTP1" + # + health_check { + path = var.check_path #"/v1/docs" # the docs api + #"/v1/medical-coder/patients" + + enabled = true + healthy_threshold = 10 + interval = 130 + port = "traffic-port" + protocol = "HTTP" + timeout = 120 + unhealthy_threshold = 10 + } + + # stickiness { + # cookie_duration = 86400 + # enabled = true + # type = "lb_cookie" + # } + +} +output "alb_target_group_arn" { + value = aws_lb_target_group.this.arn +} diff --git a/environments/eliza-agent-api/components/autoscaling_group/Makefile b/environments/eliza-agent-api/components/autoscaling_group/Makefile new file mode 100644 index 0000000..6a6bebf --- /dev/null +++ b/environments/eliza-agent-api/components/autoscaling_group/Makefile @@ -0,0 +1,4 @@ +doit : + tofu init + tofu plan + tofu apply -auto-approve diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/autoscaling_group/Readme.md b/environments/eliza-agent-api/components/autoscaling_group/Readme.md similarity index 100% rename from environments/swarms-aws-agent-api/dev/us-east-1/components/autoscaling_group/Readme.md rename to environments/eliza-agent-api/components/autoscaling_group/Readme.md diff --git a/environments/eliza-agent-api/components/autoscaling_group/main.tf b/environments/eliza-agent-api/components/autoscaling_group/main.tf new file mode 100644 index 0000000..ad5997c --- /dev/null +++ b/environments/eliza-agent-api/components/autoscaling_group/main.tf @@ -0,0 +1,149 @@ +variable "use_mixed_instances_policy" { + default = false +} +variable "mixed_instances_policy" { + default = null + type = any +} +variable "instance_requirements" { + #default = null + type = any + default = {} +} + +variable "aws_iam_instance_profile_ssm_arn" {} +variable "target_group_arn" {} +variable "name" {} +variable "instance_type" {} +variable "launch_template_id" {} +variable "image_id" {} +variable "vpc_id" {} +variable "tags" {} +variable "ec2_subnet_id" {} + +locals { + iam_instance_profile_arn = var.aws_iam_instance_profile_ssm_arn + instance_type = var.instance_type +} + +module "autoscaling" { + source = "terraform-aws-modules/autoscaling/aws" + version = "8.0.0" + name = var.name + + health_check_type = "EC2" + desired_capacity = 1 + max_size = 5 + min_size = 1 + + create_launch_template = false + update_default_version = true + + launch_template_id = var.launch_template_id + launch_template_version = "$Latest" + + vpc_zone_identifier = [var.ec2_subnet_id] + + instance_market_options = { + market_type = "spot" + } + network_interfaces = [{ + associate_public_ip_address = true + device_index = 0 + delete_on_termination = true + description = "interface1" + # security_groups = [var.security_group_id] + } + ] + instance_type = var.instance_type + #instance_requirements = var.instance_requirements description = "The attribute requirements for the type of instance. If present then `instance_type` cannot be present" + image_id = var.image_id + + create_iam_instance_profile = true + iam_role_name = "ssm-${var.name}" + iam_role_path = "/ec2/" + iam_role_description = "SSM IAM role for swarms" + iam_role_tags = { + CustomIamRole = "Yes" + } + + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + + # target_group_arn = + traffic_source_attachments = { + ex-alb = { + traffic_source_identifier = var.target_group_arn + traffic_source_type = "elbv2" # default + } + } + + # Target scaling policy schedule based on average CPU load + scaling_policies = { + avg-cpu-policy-greater-than-50 = { + policy_type = "TargetTrackingScaling" + estimated_instance_warmup = 1200 + target_tracking_configuration = { + predefined_metric_specification = { + predefined_metric_type = "ASGAverageCPUUtilization" + } + target_value = 50.0 + } + }, + predictive-scaling = { + policy_type = "PredictiveScaling" + predictive_scaling_configuration = { + mode = "ForecastAndScale" + scheduling_buffer_time = 10 + max_capacity_breach_behavior = "IncreaseMaxCapacity" + max_capacity_buffer = 10 + metric_specification = { + target_value = 32 + predefined_scaling_metric_specification = { + predefined_metric_type = "ASGAverageCPUUtilization" + resource_label = "testLabel" + } + predefined_load_metric_specification = { + predefined_metric_type = "ASGTotalCPUUtilization" + resource_label = "testLabel" + } + } + } + } + # request-count-per-target = { + # policy_type = "TargetTrackingScaling" + # estimated_instance_warmup = 120 + # target_tracking_configuration = { + # predefined_metric_specification = { + # predefined_metric_type = "ALBRequestCountPerTarget" + # resource_label = "swarms1" + # #"${module.alb.arn_suffix}/${module.alb.target_groups["ex_asg"].arn_suffix}" + # } + # target_value = 800 + # } + # } + #use_mixed_instances_policy = var.use_mixed_instances_policy + + #mixed_instances_policy = var.mixed_instances_policy + + + scale-out = { + name = "scale-out" + adjustment_type = "ExactCapacity" + policy_type = "StepScaling" + estimated_instance_warmup = 120 + step_adjustment = [ + { + scaling_adjustment = 1 + metric_interval_lower_bound = 0 + metric_interval_upper_bound = 10 + }, + { + scaling_adjustment = 2 + metric_interval_lower_bound = 10 + } + ] + } + } +} diff --git a/environments/eliza-agent-api/components/autoscaling_group/spot/main.tf b/environments/eliza-agent-api/components/autoscaling_group/spot/main.tf new file mode 100644 index 0000000..bdd2cea --- /dev/null +++ b/environments/eliza-agent-api/components/autoscaling_group/spot/main.tf @@ -0,0 +1,151 @@ +variable "use_mixed_instances_policy" { + default = true +} +#variable spot_max_price {} +variable "mixed_instances_policy" { + default = null + type = any +} +variable "instance_requirements" { + #default = null + type = any + default = {} +} + +variable "aws_iam_instance_profile_ssm_arn" {} +#variable "target_group_arn" {} +variable "name" {} +variable "instance_type" {} +variable "launch_template_id" {} +variable "image_id" {} +variable "vpc_id" {} +variable "tags" {} +variable "ec2_subnet_id" {} + +locals { + iam_instance_profile_arn = var.aws_iam_instance_profile_ssm_arn + instance_type = var.instance_type +} + +module "autoscaling" { + source = "terraform-aws-modules/autoscaling/aws" + version = "8.0.0" + name = var.name + +# traffic_source_attachments = { +# ex-alb = { +# traffic_source_identifier = var.target_group_arn +# traffic_source_type = "elbv2" # default +# } +# } + + health_check_type = "EC2" + desired_capacity = 1 + desired_capacity_type = "units" + max_size = 2 + min_size = 1 + create = true + capacity_rebalance = false + + create_launch_template = false + update_default_version = true + + launch_template_id = var.launch_template_id + launch_template_version = "$Latest" + + vpc_zone_identifier = [var.ec2_subnet_id] + + instance_market_options = { + market_type = "spot" + } + network_interfaces = [{ + associate_public_ip_address = true + device_index = 0 + delete_on_termination = true + description = "interface1" + # security_groups = [var.security_group_id] + } + ] + instance_type = var.instance_type + #instance_requirements = var.instance_requirements description = "The attribute requirements for the type of instance. If present then `instance_type` cannot be present" + image_id = var.image_id + + create_iam_instance_profile = true + iam_role_name = "ssm-${var.name}" + iam_role_path = "/ec2/" + iam_role_description = "SSM IAM role for swarms" + iam_role_tags = { + CustomIamRole = "Yes" + } + + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + + use_mixed_instances_policy = var.use_mixed_instances_policy + mixed_instances_policy = var.mixed_instances_policy + + # Target scaling policy schedule based on average CPU load + scaling_policies = { + avg-cpu-policy-greater-than-50 = { + policy_type = "TargetTrackingScaling" + estimated_instance_warmup = 1200 + target_tracking_configuration = { + predefined_metric_specification = { + predefined_metric_type = "ASGAverageCPUUtilization" + } + target_value = 50.0 + } + }, + predictive-scaling = { + policy_type = "PredictiveScaling" + predictive_scaling_configuration = { + mode = "ForecastAndScale" + scheduling_buffer_time = 10 + max_capacity_breach_behavior = "IncreaseMaxCapacity" + max_capacity_buffer = 10 + metric_specification = { + target_value = 32 + predefined_scaling_metric_specification = { + predefined_metric_type = "ASGAverageCPUUtilization" + resource_label = "testLabel" + } + predefined_load_metric_specification = { + predefined_metric_type = "ASGTotalCPUUtilization" + resource_label = "testLabel" + } + } + } + } + # request-count-per-target = { + # policy_type = "TargetTrackingScaling" + # estimated_instance_warmup = 120 + # target_tracking_configuration = { + # predefined_metric_specification = { + # predefined_metric_type = "ALBRequestCountPerTarget" + # resource_label = "swarms1" + # #"${module.alb.arn_suffix}/${module.alb.target_groups["ex_asg"].arn_suffix}" + # } + # target_value = 800 + # } + # } + + scale-out = { + name = "scale-out" + adjustment_type = "ExactCapacity" + policy_type = "StepScaling" + estimated_instance_warmup = 120 + step_adjustment = [ + { + scaling_adjustment = 1 + metric_interval_lower_bound = 0 + metric_interval_upper_bound = 10 + }, + { + scaling_adjustment = 2 + metric_interval_lower_bound = 10 + } + ] + } + } +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/Readme.md b/environments/eliza-agent-api/components/cognito_user_pool/README.md similarity index 100% rename from environments/swarms-aws-agent-api/dev/us-east-1/components/cognito_user_pool/Readme.md rename to environments/eliza-agent-api/components/cognito_user_pool/README.md diff --git a/environments/eliza-agent-api/components/cognito_user_pool/Readme.md b/environments/eliza-agent-api/components/cognito_user_pool/Readme.md new file mode 100644 index 0000000..1665bd2 --- /dev/null +++ b/environments/eliza-agent-api/components/cognito_user_pool/Readme.md @@ -0,0 +1 @@ +cognito_user_pool diff --git a/environments/eliza-agent-api/components/cognito_user_pool/main.tf b/environments/eliza-agent-api/components/cognito_user_pool/main.tf new file mode 100644 index 0000000..d4de4ba --- /dev/null +++ b/environments/eliza-agent-api/components/cognito_user_pool/main.tf @@ -0,0 +1,345 @@ +variable "google_oauth_client_secret" {} +variable "google_oauth_client_id" {} +variable aws_region {} # us-east-1 +variable aws_account {} +variable myemail {} +variable mydomain {} +variable mydomain_suffix {} +locals { + mydomain=var.mydomain + mydomain_suffix = var.mydomain_suffix + mydomain_dot_com = "${local.mydomain}.${local.mydomain_suffix}" + myemail=var.myemail + myaccount=var.aws_account +} + +module "aws_cognito_user_pool_complete_example" { + + source = "lgallard/cognito-user-pool/aws" + + user_pool_name = "mypool_complete" + alias_attributes = ["email", "phone_number"] + auto_verified_attributes = ["email"] + sms_authentication_message = "Your username is {username} and temporary password is {####}." + sms_verification_message = "This is the verification message {####}." + + deletion_protection = "ACTIVE" + + mfa_configuration = "OPTIONAL" + software_token_mfa_configuration = { + enabled = true + } + + admin_create_user_config = { + email_message = "Dear {username}, your verification code is {####}." + email_subject = "Here, your verification code baby" + sms_message = "Your username is {username} and temporary password is {####}." + } + + device_configuration = { + challenge_required_on_new_device = true + device_only_remembered_on_user_prompt = true + } + + email_configuration = { + email_sending_account = "DEVELOPER" + reply_to_email_address = "email@${local.mydomain_dot_com}" + source_arn = "arn:aws:ses:${var.aws_region}:${var.aws_account}:identity/${local.myemail}@${local.mydomain_dot_com}" + } + + lambda_config = { + create_auth_challenge = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:create_auth_challenge" + custom_message = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:custom_message" + define_auth_challenge = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:define_auth_challenge" + post_authentication = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:post_authentication" + post_confirmation = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:post_confirmation" + pre_authentication = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:pre_authentication" + pre_sign_up = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:pre_sign_up" +# pre_token_generation = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:pre_token_generation" + user_migration = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:user_migration" + verify_auth_challenge_response = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:verify_auth_challenge_response" + #kms_key_id = aws_kms_key.lambda-custom-sender.arn + pre_token_generation_config = { + lambda_arn = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:pre_token_generation_config" + lambda_version = "V1_0" + } + #custom_email_sender = { + # lambda_arn = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:custom_email_sender" + # lambda_version = "V1_0" + #} + #custom_sms_sender = { + # lambda_arn = "arn:aws:lambda:${var.aws_region}:${var.aws_account}:function:custom_sms_sender" + # lambda_version = "V1_0" + #} + } + + password_policy = { + minimum_length = 10 + require_lowercase = false + require_numbers = true + require_symbols = true + require_uppercase = true + temporary_password_validity_days = 120 + + } + + user_pool_add_ons = { + advanced_security_mode = "ENFORCED" + } + + verification_message_template = { + default_email_option = "CONFIRM_WITH_CODE" + } + + schemas = [ + { + attribute_data_type = "Boolean" + developer_only_attribute = false + mutable = true + name = "available" + required = false + }, + { + attribute_data_type = "Boolean" + developer_only_attribute = true + mutable = true + name = "registered" + required = false + } + ] + + string_schemas = [ + { + attribute_data_type = "String" + developer_only_attribute = false + mutable = false + name = "email" + required = true + + string_attribute_constraints = { + min_length = 7 + max_length = 15 + } + }, + { + attribute_data_type = "String" + developer_only_attribute = false + mutable = false + name = "gender" + required = true + + string_attribute_constraints = { + min_length = 7 + max_length = 15 + } + }, + ] + + number_schemas = [ + { + attribute_data_type = "Number" + developer_only_attribute = true + mutable = true + name = "mynumber1" + required = false + + number_attribute_constraints = { + min_value = 2 + max_value = 6 + } + }, + { + attribute_data_type = "Number" + developer_only_attribute = true + mutable = true + name = "mynumber2" + required = false + + number_attribute_constraints = { + min_value = 2 + max_value = 6 + } + }, + ] + + # user_pool_domain + domain = "${local.mydomain}-com" + + # clients + clients = [ + { + allowed_oauth_flows_user_pool_client = false + allowed_oauth_scopes = [ + "aws.cognito.signin.user.admin", + "email", + "https://introspector.meme/sample-scope-1", + "https://introspector.meme/sample-scope-2", + "openid", + "phone", + "profile", + + ] + callback_urls = ["https://${local.mydomain_dot_com}/callback"] + default_redirect_uri = "https://${local.mydomain_dot_com}/callback" + explicit_auth_flows = [] + generate_secret = true + logout_urls = [] + name = "test1" + read_attributes = ["email"] + supported_identity_providers = [ + "COGNITO", + "Google", + + ] + write_attributes = [] + access_token_validity = 1 + id_token_validity = 1 + refresh_token_validity = 60 + token_validity_units = { + access_token = "hours" + id_token = "hours" + refresh_token = "days" + } + ui_customization_css = file("./custom_style.css") + ui_customization_image_file = filebase64("logo.png") + }, + { + allowed_oauth_flows = [ + "code", + "implicit" + ] + allowed_oauth_flows_user_pool_client = false # FIXME why? + allowed_oauth_scopes = [ + "aws.cognito.signin.user.admin", + "email", + "https://introspector.meme/sample-scope-1", + "https://introspector.meme/sample-scope-2", + "openid", + "phone", + "profile", + + ] + callback_urls = ["https://${local.mydomain_dot_com}/callback"] + default_redirect_uri = "https://${local.mydomain_dot_com}/callback" + explicit_auth_flows = [] + generate_secret = false + logout_urls = [] + name = "test2" + read_attributes = [] + supported_identity_providers = [ + "COGNITO", + "Google", + + ] + write_attributes = [] + refresh_token_validity = 30 + }, + { + allowed_oauth_flows = ["code", "implicit"] + allowed_oauth_flows_user_pool_client = true + allowed_oauth_scopes = [ + "aws.cognito.signin.user.admin", + "email", + "https://introspector.meme/sample-scope-1", + "https://introspector.meme/sample-scope-2", + "openid", + "phone", + "profile", + + ] + callback_urls = ["https://${local.mydomain_dot_com}/callback"] + default_redirect_uri = "https://${local.mydomain_dot_com}/callback" + explicit_auth_flows = ["CUSTOM_AUTH_FLOW_ONLY", "ADMIN_NO_SRP_AUTH"] + generate_secret = false + logout_urls = ["https://${local.mydomain_dot_com}/logout"] + name = "test3" + read_attributes = ["email", "phone_number"] + supported_identity_providers = [] + write_attributes = ["email", "gender", "locale", ] + refresh_token_validity = 30 + } + ] + + # user_group + user_groups = [ + { name = "mygroup1" + description = "My group 1" + }, + { name = "mygroup2" + description = "My group 2" + }, + ] + + # resource_servers + resource_servers = [ + { + identifier = "https://${local.mydomain_dot_com}" + name = "${local.mydomain}" + scope = [ + { + scope_name = "sample-scope-1" + scope_description = "A sample Scope Description for ${local.mydomain_dot_com}" + }, + { + scope_name = "sample-scope-2" + scope_description = "Another sample Scope Description for ${local.mydomain_dot_com}" + }, + ] + }, + { + identifier = "https://weather-read-app.com" + name = "weather-read" + scope = [ + { + scope_name = "weather.read" + scope_description = "Read weather forecasts" + } + ] + } + ] + + # identity_providers + identity_providers = [ + { + provider_name = "Google" + provider_type = "Google" + + provider_details = { + authorize_scopes = "email" + #export TF_VAR_google_oauth_client_id=XXXX + client_id = var.google_oauth_client_id # This should be retrieved from AWS Secret Manager, otherwise Terraform will force an in-place replacement becuase is treated as a sensitive value + # export TF_VAR_google_oauth_client_secret=YYY + client_secret = var.google_oauth_client_secret #"your client_secret" # # This should be retrieved from AWS Secret Manager, otherwise Terraform will force an in-place replacement becuase is treated as a sensitive value + attributes_url_add_attributes = "true" + authorize_url = "https://accounts.google.com/o/oauth2/v2/auth" + oidc_issuer = "https://accounts.google.com" + token_request_method = "POST" + token_url = "https://www.googleapis.com/oauth2/v4/token" + } + + attribute_mapping = { + email = "email" + username = "sub" + gender = "gender" + } + } + ] + + # tags + tags = { + Owner = "infra" + Environment = "production" + Terraform = true + } +} + + + # KMS key for lambda custom sender config" + resource "aws_kms_key" "lambda-custom-sender" { + count = 0 + description = "KMS key for lambda custom sender config" + } + +output cognito{ + value = module.aws_cognito_user_pool_complete_example +} diff --git a/environments/eliza-agent-api/components/cognito_user_pool/provider.tf b/environments/eliza-agent-api/components/cognito_user_pool/provider.tf new file mode 100644 index 0000000..634c762 --- /dev/null +++ b/environments/eliza-agent-api/components/cognito_user_pool/provider.tf @@ -0,0 +1,4 @@ +provider "aws" { + region = var.env["region"] + profile = var.env["profile"] +} diff --git a/environments/eliza-agent-api/components/cognito_user_pool/variables.tf b/environments/eliza-agent-api/components/cognito_user_pool/variables.tf new file mode 100644 index 0000000..c7b7aed --- /dev/null +++ b/environments/eliza-agent-api/components/cognito_user_pool/variables.tf @@ -0,0 +1,4 @@ +variable "env" { + type = map(any) + default = {} +} diff --git a/environments/eliza-agent-api/components/keypairs/main.tf b/environments/eliza-agent-api/components/keypairs/main.tf new file mode 100644 index 0000000..2ce57c4 --- /dev/null +++ b/environments/eliza-agent-api/components/keypairs/main.tf @@ -0,0 +1,3 @@ +# resource "aws_key_pair" "mdupont-deployer" { +# key_name = "mdupont-deployer-key-2" +# } diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/lambda_workers/Readme.md b/environments/eliza-agent-api/components/lambda_workers/Readme.md similarity index 100% rename from environments/swarms-aws-agent-api/dev/us-east-1/components/lambda_workers/Readme.md rename to environments/eliza-agent-api/components/lambda_workers/Readme.md diff --git a/environments/eliza-agent-api/components/launch_template/.terraform.lock.hcl b/environments/eliza-agent-api/components/launch_template/.terraform.lock.hcl new file mode 100644 index 0000000..a720cb2 --- /dev/null +++ b/environments/eliza-agent-api/components/launch_template/.terraform.lock.hcl @@ -0,0 +1,19 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/aws" { + version = "5.81.0" + hashes = [ + "h1:ird967uf44WBZ0u9rMlOdO4nCKJZMlUknLcD3lz4dWs=", + "zh:0d0c7cc1e16b16cd00ab36de35038162e1871b51a902e9016d08c55c9fc4de35", + "zh:0e4b8c6e46999015292ab4fb9a430ab00107a276f25243552cde66db53c58661", + "zh:30041314cdd4e877d75ee8c9d36aecfca094e276f7a3d8150f929cf5169b2fa5", + "zh:5ebd248ce3e5a7ef9cc2f41499668f182146325e10ea305c70469122f6161a13", + "zh:888a69d371373b418549e03f5922becb2b8074cb463552ecfa65f30455708db0", + "zh:8a21bb7fe60383ff5ca9db8630a287e41fd520d2514c08874a16dc74500fadd7", + "zh:9c4663dcbfe58544642d70ebfdc6c5fa91592ff04164f77c655e32e6024483e2", + "zh:b322873f1209443a8407d5f782d7d917de6a1391239dbd0e7f809ce6507bed76", + "zh:b7c9d5ca14b818b5932ac58a490646a425ebc41b33d149090aa5f48d1ca35c99", + "zh:e76cd202b03749f3082b0cbe849fd2e731cf3f9a6aa994d2d629602c3aede36c", + ] +} diff --git a/environments/eliza-agent-api/components/launch_template/Makefile b/environments/eliza-agent-api/components/launch_template/Makefile new file mode 100644 index 0000000..6a6bebf --- /dev/null +++ b/environments/eliza-agent-api/components/launch_template/Makefile @@ -0,0 +1,4 @@ +doit : + tofu init + tofu plan + tofu apply -auto-approve diff --git a/environments/eliza-agent-api/components/launch_template/main.tf b/environments/eliza-agent-api/components/launch_template/main.tf new file mode 100644 index 0000000..95c36c7 --- /dev/null +++ b/environments/eliza-agent-api/components/launch_template/main.tf @@ -0,0 +1,102 @@ +variable "ssm_parameter_name_cw_agent_config" {} +variable "branch" {} +variable "install_script" {} +variable "iam_instance_profile_name" {} +variable "security_group_id" {} +variable "name" {} +variable "vpc_id" {} +variable "ami_id" {} +variable "tags" {} +variable "key_name" {} +variable "instance_type" {} + +locals { + tags = { + project = "swarms" + instance_type = var.instance_type + name = var.name + } + user_data = <<-EOF + #!/bin/bash + export HOME=/root + apt update + apt-get install -y ec2-instance-connect git virtualenv + snap install amazon-ssm-agent --classic || echo oops1 + snap start amazon-ssm-agent || echo oops2 + apt-get install -y --no-install-recommends ca-certificates=20230311 curl=7.88.1-10+deb12u7 | echo oops + curl -O "https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/$(dpkg --print-architecture)/latest/amazon-cloudwatch-agent.deb" + dpkg -i -E amazon-cloudwatch-agent.deb + # Install prerequisite packages + apt-get install -y wget unzip systemd + # In case of missing dependencies + # apt-get install -f -y + # Configure and start the CloudWatch agent + /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c "ssm:${data.aws_ssm_parameter.cw_agent_config.name}" + # Enable and start the service using systemctl + systemctl enable amazon-cloudwatch-agent + systemctl start amazon-cloudwatch-agent + + # Clean up downloaded files + rm -f amazon-cloudwatch-agent.deb + # Verify installation + systemctl status amazon-cloudwatch-agent + + if [ ! -d "/opt/swarms/" ]; then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" + fi + cd "/opt/swarms/" || exit 1 + export BRANCH=${var.branch} + git stash + git fetch --all # get the latest version + git checkout --force $BRANCH + + bash -x ${var.install_script} + EOF + +} +data "aws_ssm_parameter" "cw_agent_config" { + #arn:aws:ssm:us-east-2:916723593639:parameter/cloudwatch-agent/config + name = var.ssm_parameter_name_cw_agent_config + #"/cloudwatch-agent/config" +} +# defined +resource "aws_launch_template" "ec2_launch_template" { + name_prefix = "${var.name}-launch-template-" + image_id = var.ami_id + key_name = var.key_name + instance_type = var.instance_type + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [var.security_group_id] + } + + iam_instance_profile { + # iam_instance_profile_arn = aws_iam_instance_profile.ssm.arn + name = var.iam_instance_profile_name #aws_iam_instance_profile.ec2_instance_profile.name + } + lifecycle { + create_before_destroy = true + } + block_device_mappings { + device_name = "/dev/sda1" + ebs { + volume_size = 30 + volume_type = "gp3" + encrypted = true + } + } + user_data = base64encode(local.user_data) + tags = var.tags +} + + +output "lt" { + value = resource.aws_launch_template.ec2_launch_template +} +output "launch_template_id" { + value = resource.aws_launch_template.ec2_launch_template.id +} +output "user_data" { + value = local.user_data +} diff --git a/environments/eliza-agent-api/components/launch_template_docker/main.tf b/environments/eliza-agent-api/components/launch_template_docker/main.tf new file mode 100644 index 0000000..6c9a46b --- /dev/null +++ b/environments/eliza-agent-api/components/launch_template_docker/main.tf @@ -0,0 +1,119 @@ +# launch template for running agent api in docker +variable "ssm_parameter_name_cw_agent_config" {} +variable "branch" {} +variable "install_script" {} +variable "iam_instance_profile_name" {} +variable "security_group_id" {} +variable "name" {} +variable "vpc_id" {} +variable "ami_id" {} +variable "tags" {} +variable "key_name" {} +variable "instance_type" {} + +locals { + tags = { + project = "swarms" + instance_type = var.instance_type + name = var.name + } + # FIXME refactor launch template to pass in user data as template parameter, + # split up user data into reusable chunks that we can use in different forms like docker files + user_data = <<-EOF + #!/bin/bash + export HOME=/root + apt update + apt-get install -y ec2-instance-connect git + + # Install docker + apt-get install -y cloud-utils apt-transport-https ca-certificates curl software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + apt-get update + apt-get install -y docker-ce + usermod -aG docker ubuntu + + # Install docker-compose + curl -L https://github.com/docker/compose/releases/download/1.21.0/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose + chmod +x /usr/local/bin/docker-compose + + snap install amazon-ssm-agent --classic || echo oops1 + snap start amazon-ssm-agent || echo oops2 + apt-get install -y --no-install-recommends ca-certificates=20230311 curl=7.88.1-10+deb12u7 | echo oops + curl -O "https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/$(dpkg --print-architecture)/latest/amazon-cloudwatch-agent.deb" + dpkg -i -E amazon-cloudwatch-agent.deb + # Install prerequisite packages + apt-get install -y wget unzip systemd + # In case of missing dependencies + # apt-get install -f -y + # Configure and start the CloudWatch agent + /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c "ssm:${data.aws_ssm_parameter.cw_agent_config.name}" + # Enable and start the service using systemctl + systemctl enable amazon-cloudwatch-agent + systemctl start amazon-cloudwatch-agent + + # Clean up downloaded files + rm -f amazon-cloudwatch-agent.deb + # Verify installation + systemctl status amazon-cloudwatch-agent + + if [ ! -d "/opt/swarms/" ]; then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" + fi + cd "/opt/swarms/" || exit 1 + git stash + git fetch --all # get the latest version + git checkout --track --force "origin/${var.branch}" + + bash -x ${var.install_script} + EOF + +} +data "aws_ssm_parameter" "cw_agent_config" { + name = var.ssm_parameter_name_cw_agent_config +} + +# defined +resource "aws_launch_template" "ec2_launch_template" { + name_prefix = "${var.name}-launch-template-" + image_id = var.ami_id + key_name = var.key_name + instance_type = var.instance_type + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [var.security_group_id] + } + + iam_instance_profile { + # iam_instance_profile_arn = aws_iam_instance_profile.ssm.arn + name = var.iam_instance_profile_name #aws_iam_instance_profile.ec2_instance_profile.name + } + lifecycle { + create_before_destroy = true + } + block_device_mappings { + device_name = "/dev/sda1" + ebs { + volume_size = 30 + volume_type = "gp3" + encrypted = true + } + } + user_data = base64encode(local.user_data) + tags = var.tags +} + + +output "lt" { + value = resource.aws_launch_template.ec2_launch_template +} +output "launch_template_id" { + value = resource.aws_launch_template.ec2_launch_template.id +} +output "user_data" { + value = local.user_data +} diff --git a/environments/eliza-agent-api/components/launch_template_docker_mcs/main.tf b/environments/eliza-agent-api/components/launch_template_docker_mcs/main.tf new file mode 100644 index 0000000..c104c6b --- /dev/null +++ b/environments/eliza-agent-api/components/launch_template_docker_mcs/main.tf @@ -0,0 +1,114 @@ +# launch template for running agent api in docker +variable "git_repo" {} +variable "app_name" {} +variable "ssm_parameter_name_cw_agent_config" {} +variable "branch" {} +variable "install_script" {} +variable "iam_instance_profile_name" {} +variable "security_group_id" {} +variable "name" {} +variable "vpc_id" {} +variable "ami_id" {} +variable "tags" {} +variable "key_name" {} +variable "instance_type" {} + +locals { + tags = { + project = "agent" + instance_type = var.instance_type + name = var.name + } + # FIXME refactor launch template to pass in user data as template parameter, + # split up user data into reusable chunks that we can use in different forms like docker files + user_data = <<-EOF + #!/bin/bash + export HOME=/root + apt update + apt-get install -y ec2-instance-connect git + # Install docker + apt-get install -y cloud-utils apt-transport-https ca-certificates curl software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + apt-get update + apt-get install -y docker-ce + usermod -aG docker ubuntu + # Install docker-compose FIXME remove this as not needed? + curl -L https://github.com/docker/compose/releases/download/1.21.0/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose + chmod +x /usr/local/bin/docker-compose + snap install amazon-ssm-agent --classic || echo oops1 + snap start amazon-ssm-agent || echo oops2 + apt-get install -y --no-install-recommends ca-certificates=20230311 curl=7.88.1-10+deb12u7 | echo oops + curl -O "https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/$(dpkg --print-architecture)/latest/amazon-cloudwatch-agent.deb" + dpkg -i -E amazon-cloudwatch-agent.deb + # Install prerequisite packages + apt-get install -y wget unzip systemd + # In case of missing dependencies + # apt-get install -f -y + # Configure and start the CloudWatch agent + /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c "ssm:${data.aws_ssm_parameter.cw_agent_config.name}" + # Enable and start the service using systemctl + systemctl enable amazon-cloudwatch-agent + systemctl start amazon-cloudwatch-agent + # Clean up downloaded files + rm -f amazon-cloudwatch-agent.deb + # Verify installation + systemctl status amazon-cloudwatch-agent + if [ ! -d "/opt/${var.app_name}/" ]; then + git clone ${var.git_repo} "/opt/${var.app_name}/" + fi + cd "/opt/${var.app_name}/" || exit 1 + git stash + git fetch --all # get the latest version + git checkout --track --force "origin/${var.branch}" + bash -x ${var.install_script} + EOF +} +data "aws_ssm_parameter" "cw_agent_config" { + name = var.ssm_parameter_name_cw_agent_config +} + +# defined +resource "aws_launch_template" "ec2_launch_template" { + name_prefix = "${var.name}-launch-template-" + image_id = var.ami_id + key_name = var.key_name + instance_type = var.instance_type + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [var.security_group_id] + } + + iam_instance_profile { + # iam_instance_profile_arn = aws_iam_instance_profile.ssm.arn + name = var.iam_instance_profile_name #aws_iam_instance_profile.ec2_instance_profile.name + } + lifecycle { + create_before_destroy = true + } + block_device_mappings { + device_name = "/dev/sda1" + ebs { + volume_size = 30 + volume_type = "gp3" + encrypted = true + } + } + user_data = base64encode(local.user_data) + tags = var.tags +} + + +output "lt" { + value = resource.aws_launch_template.ec2_launch_template +} +output "launch_template_id" { + value = resource.aws_launch_template.ec2_launch_template.id +} +output "user_data" { + value = local.user_data +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/.terraform.lock.hcl b/environments/eliza-agent-api/components/machine_image/.terraform.lock.hcl similarity index 100% rename from environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/.terraform.lock.hcl rename to environments/eliza-agent-api/components/machine_image/.terraform.lock.hcl diff --git a/environments/eliza-agent-api/components/machine_image/Makefile b/environments/eliza-agent-api/components/machine_image/Makefile new file mode 100644 index 0000000..6a6bebf --- /dev/null +++ b/environments/eliza-agent-api/components/machine_image/Makefile @@ -0,0 +1,4 @@ +doit : + tofu init + tofu plan + tofu apply -auto-approve diff --git a/environments/eliza-agent-api/components/machine_image/Readme.md b/environments/eliza-agent-api/components/machine_image/Readme.md new file mode 100644 index 0000000..61cd739 --- /dev/null +++ b/environments/eliza-agent-api/components/machine_image/Readme.md @@ -0,0 +1,18 @@ +machine_image + + +`tofu apply -destroy -target module.ec2.aws_spot_instance_request.this[0] -auto-approve` + +aws ec2 describe-images --owners 099720109477 > images.json +* + +tofu state rm "module.ec2.aws_spot_instance_request.this[0]" + +# packer build + +``` +packer init -upgrade ./ubuntu-fastapi.pkr.hcl +packer fmt ./ubuntu-fastapi.pkr.hcl +export AWS_DEFAULT_PROFILE=swarms +packer build ./ubuntu-fastapi.pkr.hcl +``` diff --git a/environments/eliza-agent-api/components/machine_image/main.tf b/environments/eliza-agent-api/components/machine_image/main.tf new file mode 100644 index 0000000..c4eb0c7 --- /dev/null +++ b/environments/eliza-agent-api/components/machine_image/main.tf @@ -0,0 +1,83 @@ + +locals { + name = "swarms" + tags = { + project = "swarms" + } +} + +module "security_group_instance" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + name = "${local.name}-ec2" + description = "Security Group for EC2 Instance" + vpc_id = local.vpc_id + ingress_with_cidr_blocks = [ + { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + }, + { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + } + ] + + egress_rules = ["all-all"] + tags = local.tags +} + +module "ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + associate_public_ip_address = true # for now + name = local.name + ami = local.ami # data.aws_ami.ubuntu.id + instance_type = "t3.large" + create_iam_instance_profile = true + iam_role_description = "IAM role for EC2 instance" + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + vpc_security_group_ids = [module.security_group_instance.security_group_id] + + root_block_device = [ + { + encrypted = true + volume_size = 30 + volume_type = "gp3" + } + ] + + user_data = <<-EOF +#!/bin/bash +export HOME=/root +apt update +apt-get install -y ec2-instance-connect git virtualenv + +if [ ! -d "/opt/swarms/" ]; + then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" +fi +cd "/opt/swarms/" || exit 1 # "we need swarms" +export BRANCH=feature/ec2 +git checkout --force $BRANCH +bash -x /opt/swarms/api/install.sh + EOF + tags = local.tags + create_spot_instance = true + subnet_id = local.ec2_subnet_id +} + + +output "ec2_data" { + value = module.ec2 +} + +output "iam_instance_profile_name" { + value = module.ec2.iam_instance_profile_id + description = "IAM Instance Profile Name created for EC2 instance" +} diff --git a/environments/eliza-agent-api/components/machine_image/main.tf.asg b/environments/eliza-agent-api/components/machine_image/main.tf.asg new file mode 100644 index 0000000..b05f717 --- /dev/null +++ b/environments/eliza-agent-api/components/machine_image/main.tf.asg @@ -0,0 +1,253 @@ +provider "aws" { + region = "us-east-1" +} + +locals { + ami = "ami-0e2c8caa4b6378d8c" + name = "swarms" + region = "us-east-1" + ec2_subnet_id = "subnet-057c90cfe7b2e5646" + vpc_id = "vpc-04f28c9347af48b55" + tags = { + project="swarms" + } +} + +module "security_group_instance" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + name = "${local.name}-ec2" + description = "Security Group for EC2 Instance" + vpc_id = local.vpc_id + ingress_with_cidr_blocks = [ + { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + }, + { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + } + ] + + egress_rules = ["all-all"] + tags = local.tags +} + +module "ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + associate_public_ip_address = true # for now + name = local.name + ami = local.ami # data.aws_ami.ubuntu.id + instance_type = "t3.large" + create_iam_instance_profile = true + iam_role_description = "IAM role for EC2 instance" + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + vpc_security_group_ids = [module.security_group_instance.security_group_id] + + root_block_device = [ + { + encrypted = true + volume_size = 30 + volume_type = "gp3" + } + ] + + user_data = <<-EOF +#!/bin/bash +export HOME=/root +apt update +apt-get install -y ec2-instance-connect git virtualenv + +if [ ! -d "/opt/swarms/" ]; + then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" +fi +cd "/opt/swarms/" || exit 1 # "we need swarms" +export BRANCH=feature/ec2 +git checkout --force $BRANCH +bash -x /opt/swarms/api/install.sh + EOF + tags = local.tags + create_spot_instance = true + subnet_id = local.ec2_subnet_id +} + +TASK FOR GPT: +#1/ create an autoscaling group +#2/ create a launch template +#3/ create a lambda function to trigger the launching of new instances + + + +To create an Auto Scaling group, a launch template, and a Lambda function to trigger the launching of new instances, you can add the following configurations to your Terraform script: + +### 1. Create a Launch Template + +```hcl +resource "aws_launch_template" "ec2_launch_template" { + name_prefix = "${local.name}-launch-template-" + image_id = local.ami + instance_type = "t3.large" + associate_public_ip_address = true + + iam_instance_profile { + name = aws_iam_instance_profile.ec2_instance_profile.name + } + + key_name = "your-key-pair" # Replace with your key pair name + + lifecycle { + create_before_destroy = true + } + + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = 30 + volume_type = "gp3" + encrypted = true + } + } + + user_data = <<-EOF + #!/bin/bash + export HOME=/root + apt update + apt-get install -y ec2-instance-connect git virtualenv + + if [ ! -d "/opt/swarms/" ]; then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" + fi + cd "/opt/swarms/" || exit 1 + export BRANCH=feature/ec2 + git checkout --force $BRANCH + bash -x /opt/swarms/api/install.sh + EOF + + tags = local.tags +} +``` + +### 2. Create an Auto Scaling Group + +```hcl +resource "aws_autoscaling_group" "ec2_autoscaling_group" { + desired_capacity = 1 + max_size = 5 + min_size = 1 + + launch_template { + id = aws_launch_template.ec2_launch_template.id + version = "$Latest" + } + + vpc_zone_identifier = [local.ec2_subnet_id] + + tags = [ + { + key = "Name" + value = local.name + propagate_at_launch = true + }, + { + key = "Project" + value = local.tags.project + propagate_at_launch = true + } + ] +} +``` + +### 3. Create a Lambda Function to Trigger New Instances + +```hcl +resource "aws_iam_role" "lambda_role" { + name = "${local.name}-lambda-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_policy" "lambda_scaling_policy" { + name = "${local.name}-scaling-policy" + description = "Policy to allow Lambda to manage Auto Scaling" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:SetDesiredCapacity" + ] + Resource = "*" + Effect = "Allow" + }] + }) +} + +resource "aws_iam_role_policy_attachment" "lambda_policy_attach" { + role = aws_iam_role.lambda_role.name + policy_arn = aws_iam_policy.lambda_scaling_policy.arn +} + +resource "aws_lambda_function" "scale_lambda" { + function_name = "${local.name}-scale-lambda" + role = aws_iam_role.lambda_role.arn + + runtime = "python3.8" + handler = "scale_lambda.handler" + + source_code_hash = filebase64sha256("lambda/scale_lambda.zip") + + # Environment variables for the Lambda function + environment = { + AUTO_SCALING_GROUP_NAME = aws_autoscaling_group.ec2_autoscaling_group.name + } +} +``` + +### 4. Lambda Function Code (Python) + +You'll need to create a file named `scale_lambda.py` in the `lambda/` directory with content like the following: + +```python +import boto3 +import os + +def handler(event, context): + asg_name = os.environ['AUTO_SCALING_GROUP_NAME'] + client = boto3.client('autoscaling') + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) + desired_capacity = response['AutoScalingGroups'][0]['DesiredCapacity'] + + # Example scaling logic + client.set_desired_capacity( + AutoScalingGroupName=asg_name, + DesiredCapacity=desired_capacity + 1, # Increment desired instances + HonorCooldown=True + ) +``` + +### Note +- Replace `"your-key-pair"` in the launch template with your actual key pair name. +- Ensure you have the right IAM permissions for your Lambda function. +- You may need to handle packaging and deploying your Lambda function code (`scale_lambda.py`) as a zip file for Terraform. +- Adjust desired capacity and scaling logic as per your requirement. + +This configuration sets up an Auto Scaling group with a launch template and a Lambda function that can adjust the desired number of running instances based on certain conditions (you would need to implement those conditions). \ No newline at end of file diff --git a/environments/eliza-agent-api/components/machine_image/main.tf.consts.org b/environments/eliza-agent-api/components/machine_image/main.tf.consts.org new file mode 100644 index 0000000..192c054 --- /dev/null +++ b/environments/eliza-agent-api/components/machine_image/main.tf.consts.org @@ -0,0 +1,194 @@ +provider "aws" { + region = "us-east-1" +} + +#1. create an interface to import these constants from other modules in a clean way +locals { + ami = "ami-0e2c8caa4b6378d8c" + name = "swarms" + region = "us-east-1" + ec2_subnet_id = "subnet-057c90cfe7b2e5646" + vpc_id = "vpc-04f28c9347af48b55" + tags = { + project="swarms" + } +} + +module "security_group_instance" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + name = "${local.name}-ec2" + description = "Security Group for EC2 Instance" + vpc_id = local.vpc_id + ingress_with_cidr_blocks = [ + { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + }, + { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = "0.0.0.0/0" + } + ] + + egress_rules = ["all-all"] + tags = local.tags +} + +module "ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + associate_public_ip_address = true # for now + name = local.name + ami = local.ami # data.aws_ami.ubuntu.id + instance_type = "t3.large" + create_iam_instance_profile = true + iam_role_description = "IAM role for EC2 instance" + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + vpc_security_group_ids = [module.security_group_instance.security_group_id] + + root_block_device = [ + { + encrypted = true + volume_size = 30 + volume_type = "gp3" + } + ] + + user_data = <<-EOF +#!/bin/bash +export HOME=/root +apt update +apt-get install -y ec2-instance-connect git virtualenv + +if [ ! -d "/opt/swarms/" ]; + then + git clone https://github.com/jmikedupont2/swarms "/opt/swarms/" +fi +cd "/opt/swarms/" || exit 1 # "we need swarms" +export BRANCH=feature/ec2 +git checkout --force $BRANCH +bash -x /opt/swarms/api/install.sh + EOF + tags = local.tags + create_spot_instance = true + subnet_id = local.ec2_subnet_id +} + +# create an autoscaling group +# create a launch template +# create a lambda function to trigger the launching of new instances + + +To create a clean interface for importing constants from other modules in Terraform, you can follow the structure outlined below. This involves the use of variables and outputs to encapsulate the constants while maintaining modularity. Here's a concise example: + +### 1. Define Variables in a `variables.tf` File + +```hcl +variable "ami" { + description = "AMI ID for the EC2 instance" + type = string +} + +variable "name" { + description = "Name for the resources" + type = string +} + +variable "region" { + description = "AWS region" + type = string +} + +variable "ec2_subnet_id" { + description = "Subnet ID for the EC2 instance" + type = string +} + +variable "vpc_id" { + description = "VPC ID where resources are deployed" + type = string +} + +variable "tags" { + description = "Tags to assign to resources" + type = map(string) +} +``` + +### 2. Create a `locals.tf` File to Define Constants + +```hcl +locals { + ami = "ami-0e2c8caa4b6378d8c" + name = "swarms" + region = "us-east-1" + ec2_subnet_id = "subnet-057c90cfe7b2e5646" + vpc_id = "vpc-04f28c9347af48b55" + tags = { + project = "swarms" + } +} +``` + +### 3. Outputs for Constants in an `outputs.tf` File + +```hcl +output "ami" { + value = local.ami +} + +output "name" { + value = local.name +} + +output "region" { + value = local.region +} + +output "ec2_subnet_id" { + value = local.ec2_subnet_id +} + +output "vpc_id" { + value = local.vpc_id +} + +output "tags" { + value = local.tags +} +``` + +### 4. Import Variables in Other Modules + +In your main module or other resource modules, import these variables as shown below: + +```hcl +module "constants" { + source = "./path-to-constants-module" # replace with the actual path +} + +# Use them as follows +module "security_group_instance" { + source = "terraform-aws-modules/security-group/aws" + name = "${module.constants.name}-ec2" + vpc_id = module.constants.vpc_id + # ... other configurations +} + +module "ec2" { + ami = module.constants.ami + name = module.constants.name + subnet_id = module.constants.ec2_subnet_id + # ... other configurations +} +``` + +### Summary + +This structure ensures that you can import and use constants across your Terraform module cleanly without hardcoding values in multiple places. Each module can reference these shared values as needed for configurations. diff --git a/environments/eliza-agent-api/components/machine_image/ubuntu-fastapi.pkr.hcl b/environments/eliza-agent-api/components/machine_image/ubuntu-fastapi.pkr.hcl new file mode 100644 index 0000000..d7933e7 --- /dev/null +++ b/environments/eliza-agent-api/components/machine_image/ubuntu-fastapi.pkr.hcl @@ -0,0 +1,38 @@ +packer { + required_plugins { + amazon = { + version = ">= 1.2.8" + source = "github.com/hashicorp/amazon" + } + } +} + +source "amazon-ebs" "ubuntu" { + ami_name = "ubuntu-swarms-{{timestamp}}" + instance_type = "t2.medium" + region = "us-east-2" + source_ami = "ami-0325b9a2dfb474b2d" # Ubuntu 20.04 LTS + ssh_username = "ubuntu" + launch_block_device_mappings { + device_name = "/dev/sda1" + volume_size = 30 + volume_type = "gp3" + delete_on_termination = true + } +} + +build { + provisioner "shell" { + inline = [ + "sudo apt-get update", + "sudo apt-get install -y python3 python3-pip git", + "export HOME=/root", + "sudo apt-get install -y ec2-instance-connect git virtualenv", + "sudo git clone https://github.com/jmikedupont2/swarms '/opt/swarms/'", + "cd /opt/swarms/; sudo git checkout --force feature/ec2", + "sudo bash -x /opt/swarms/api/install.sh" + ] + } + + sources = ["source.amazon-ebs.ubuntu"] +} diff --git a/environments/eliza-agent-api/components/machine_image/variables.tf b/environments/eliza-agent-api/components/machine_image/variables.tf new file mode 100644 index 0000000..428bf9e --- /dev/null +++ b/environments/eliza-agent-api/components/machine_image/variables.tf @@ -0,0 +1,3 @@ +variable "test_server_count" { + default = 1 # how many test servers to run +} diff --git a/environments/eliza-agent-api/components/machine_image/versions.tf b/environments/eliza-agent-api/components/machine_image/versions.tf new file mode 100644 index 0000000..e69de29 diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/resource_launchers/Readme.md b/environments/eliza-agent-api/components/resource_launchers/Readme.md similarity index 100% rename from environments/swarms-aws-agent-api/dev/us-east-1/components/resource_launchers/Readme.md rename to environments/eliza-agent-api/components/resource_launchers/Readme.md diff --git a/environments/eliza-agent-api/components/resource_launchers/lambda/scale_lambda.py b/environments/eliza-agent-api/components/resource_launchers/lambda/scale_lambda.py new file mode 100644 index 0000000..67e270d --- /dev/null +++ b/environments/eliza-agent-api/components/resource_launchers/lambda/scale_lambda.py @@ -0,0 +1,19 @@ +#You'll need to create a file named `scale_lambda.py` in the `lambda/` directory with content like the following: + +#```python +import boto3 +import os + +def handler(event, context): + asg_name = os.environ['AUTO_SCALING_GROUP_NAME'] + client = boto3.client('autoscaling') + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) + desired_capacity = response['AutoScalingGroups'][0]['DesiredCapacity'] + + # Example scaling logic + client.set_desired_capacity( + AutoScalingGroupName=asg_name, + DesiredCapacity=desired_capacity + 1, # Increment desired instances + HonorCooldown=True + ) diff --git a/environments/eliza-agent-api/components/resource_launchers/main.tf b/environments/eliza-agent-api/components/resource_launchers/main.tf new file mode 100644 index 0000000..1b0de80 --- /dev/null +++ b/environments/eliza-agent-api/components/resource_launchers/main.tf @@ -0,0 +1,51 @@ +resource "aws_iam_role" "lambda_role" { + name = "${local.name}-lambda-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_policy" "lambda_scaling_policy" { + name = "${local.name}-scaling-policy" + description = "Policy to allow Lambda to manage Auto Scaling" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:SetDesiredCapacity" + ] + Resource = "*" + Effect = "Allow" + }] + }) +} + +resource "aws_iam_role_policy_attachment" "lambda_policy_attach" { + role = aws_iam_role.lambda_role.name + policy_arn = aws_iam_policy.lambda_scaling_policy.arn +} + +resource "aws_lambda_function" "scale_lambda" { + function_name = "${local.name}-scale-lambda" + role = aws_iam_role.lambda_role.arn + + runtime = "python3.8" + handler = "scale_lambda.handler" + + source_code_hash = filebase64sha256("lambda/scale_lambda.zip") + + # Environment variables for the Lambda function + environment = { + AUTO_SCALING_GROUP_NAME = aws_autoscaling_group.ec2_autoscaling_group.name + } +} diff --git a/environments/eliza-agent-api/components/roles/main.tf b/environments/eliza-agent-api/components/roles/main.tf new file mode 100644 index 0000000..40fe881 --- /dev/null +++ b/environments/eliza-agent-api/components/roles/main.tf @@ -0,0 +1,180 @@ +variable "tags" {} + +data "aws_iam_policy_document" "default" { + statement { + actions = ["ssm:DescribeParameters"] + resources = ["*"] + effect = "Allow" + } + + + statement { + effect= "Allow" + actions= [ + "ecr:GetAuthorizationToken", + "ecr:CreateRepository", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage", + "ecr:GetLifecyclePolicy", + "ecr:GetLifecyclePolicyPreview", + "ecr:ListTagsForResource", + "ecr:DescribeImageScanFindings" + ] + resources = ["*"] + } + +# statement { +# actions = ["kms:Decrypt"] +# resources = ["arn:aws:kms:us-east-2:916723593639:key/cc8e1ee7-a05b-4642-bd81-ba5548635590"] +# effect = "Allow" +# } + + statement { + actions = [ + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:PutLogEventsBatch", + "cloudwatch:PutMetricData", + "ec2:DescribeTags", + ] + resources = ["*"] + effect = "Allow" + } + + # statement { + # actions = [ + # "ecr:CompleteLayerUpload", + # "ecr:UploadLayerPart", + # "ecr:InitiateLayerUpload", + # "ecr:BatchCheckLayerAvailability", + # "ecr:PutImage", + # "ecr:BatchGetImage" + # ] + + # resources = [ + # "arn:aws:ecr:us-east-2:916723593639:swarms/mcs" + # ] + + # effect = "Allow" + # } + + + statement { + effect = "Allow" + resources = ["arn:aws:s3:::*session-logs*"] # FIXME must match + actions = [ + "s3:GetEncryptionConfiguration" + ] + } + + statement { + effect = "Allow" + resources = ["*"] + actions = [ + "kms:Encrypt*", + "kms:Decrypt*", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:Describe*" + ] + } + + # statement { + # sid = "Allow CloudWatch access" + # effect = "Allow" + # principals { + # type = "Service" + # identifiers = ["logs.us-east-2.amazonaws.com"] + # } + # actions = [ + # "kms:Encrypt*", + # "kms:Decrypt*", + # "kms:ReEncrypt*", + # "kms:GenerateDataKey*", + # "kms:Describe*" + # ] + # condition { + # test = "ArnLike" + # values = ["arn:aws:logs:region:${data.aws_caller_identity.current.account_id}:*"] + # variable = "kms:EncryptionContext:aws:logs:arn" + # } + # } + + #arn:aws:logs:us-east-2:916723593639:log-group::log-stream + + # statement { + # actions = ["${var.ssm_actions}"] + # resources = ["${formatlist("arn:aws:ssm:%s:%s:parameter/%s", var.region, var.account_id, var.ssm_parameters)}"] + # effect = "Allow" + # } + +} + +resource "aws_iam_policy" "default" { + name = "swarms-ssm" + description = "Allow SSM actions" + policy = data.aws_iam_policy_document.default.json +} + +resource "aws_iam_role_policy_attachment" "AmazonSSMManagedEC2InstanceDefaultPolicy" { + role = join("", aws_iam_role.ssm.*.name) + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedEC2InstanceDefaultPolicy" +} + +resource "aws_iam_role_policy_attachment" "default" { + # count = local.policy_only + role = join("", aws_iam_role.ssm.*.name) + policy_arn = join("", aws_iam_policy.default.*.arn) +} + +resource "aws_iam_role_policy_attachment" "SSM-role-policy-attach" { + role = join("", aws_iam_role.ssm.*.name) + policy_arn = data.aws_iam_policy.AmazonSSMManagedInstanceCore.arn +} + +data "aws_iam_policy" "AmazonSSMManagedInstanceCore" { + arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} + + +resource "aws_iam_role" "ssm" { + name = "ssm-agent-role" + tags = var.tags + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Action = "sts:AssumeRole", + Principal = { + Service = "ec2.amazonaws.com" + }, + Effect = "Allow", + Sid = "" + } + ] + }) +} + +resource "aws_iam_instance_profile" "ssm" { + name = "ssm-agent-profile" + role = aws_iam_role.ssm.name + tags = var.tags +} + + +output "ssm_profile_name" { + value = aws_iam_instance_profile.ssm.name +} + +output "ssm_profile_arn" { + value = aws_iam_instance_profile.ssm.arn +} diff --git a/environments/eliza-agent-api/components/security/main.tf b/environments/eliza-agent-api/components/security/main.tf new file mode 100644 index 0000000..f099b61 --- /dev/null +++ b/environments/eliza-agent-api/components/security/main.tf @@ -0,0 +1,53 @@ +variable "vpc_id" {} +variable "tags" {} +variable "name" {} + +module "asg_sg" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + + name = "${var.name}-external" + description = "external group" + vpc_id = var.vpc_id + + ingress_cidr_blocks = ["0.0.0.0/0"] + ingress_rules = [ + "https-443-tcp", + "http-80-tcp", + # "ssh-tcp" dont need this now + ] + + egress_rules = ["all-all"] + + tags = var.tags +} + +module "asg_sg_internal" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 5.0" + + name = "${var.name}-internal" + description = "An internal security group" + vpc_id = var.vpc_id + # see ~/2024/12/13/terraform-aws-security-group/examples/complete/main.tf + ingress_with_source_security_group_id = [ + { + rule = "http-80-tcp", + # only allow from load balancer for security + source_security_group_id = module.asg_sg.security_group_id + } + ] + egress_rules = ["all-all"] + + tags = var.tags +} + +output "security_group_id" { + value = module.asg_sg.security_group_id +} + +output "internal_security_group_id" { + value = module.asg_sg_internal.security_group_id +} + + diff --git a/environments/eliza-agent-api/components/ses/context.tf b/environments/eliza-agent-api/components/ses/context.tf new file mode 100644 index 0000000..5e0ef88 --- /dev/null +++ b/environments/eliza-agent-api/components/ses/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/environments/eliza-agent-api/components/ses/main.tf b/environments/eliza-agent-api/components/ses/main.tf new file mode 100644 index 0000000..f529846 --- /dev/null +++ b/environments/eliza-agent-api/components/ses/main.tf @@ -0,0 +1,23 @@ +#variable "zone_id" {} # domain +variable "verify_domain" {} +variable "group_name" {} +variable "verify_dkim" {} +variable "domain" {} + +#resource "aws_route53_zone" "private_dns_zone" { +# name = var.domain +# tags = module.this.tags +#} + +# module "ses" { +# source = "git::https://github.com/cloudposse/terraform-aws-ses.git" +# domain = var.domain +# # zone_id = var.zone_id +# # zone_id = aws_route53_zone.private_dns_zone.zone_id +# verify_dkim = var.verify_dkim +# verify_domain = var.verify_domain +# ses_group_name = var.group_name +# context = module.this.context +# #ses_group_enabled = false +# ses_user_enabled = false +# } diff --git a/environments/eliza-agent-api/components/vpc/.terraform.lock.hcl b/environments/eliza-agent-api/components/vpc/.terraform.lock.hcl new file mode 100644 index 0000000..a688307 --- /dev/null +++ b/environments/eliza-agent-api/components/vpc/.terraform.lock.hcl @@ -0,0 +1,20 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/aws" { + version = "5.81.0" + constraints = ">= 5.46.0" + hashes = [ + "h1:ird967uf44WBZ0u9rMlOdO4nCKJZMlUknLcD3lz4dWs=", + "zh:0d0c7cc1e16b16cd00ab36de35038162e1871b51a902e9016d08c55c9fc4de35", + "zh:0e4b8c6e46999015292ab4fb9a430ab00107a276f25243552cde66db53c58661", + "zh:30041314cdd4e877d75ee8c9d36aecfca094e276f7a3d8150f929cf5169b2fa5", + "zh:5ebd248ce3e5a7ef9cc2f41499668f182146325e10ea305c70469122f6161a13", + "zh:888a69d371373b418549e03f5922becb2b8074cb463552ecfa65f30455708db0", + "zh:8a21bb7fe60383ff5ca9db8630a287e41fd520d2514c08874a16dc74500fadd7", + "zh:9c4663dcbfe58544642d70ebfdc6c5fa91592ff04164f77c655e32e6024483e2", + "zh:b322873f1209443a8407d5f782d7d917de6a1391239dbd0e7f809ce6507bed76", + "zh:b7c9d5ca14b818b5932ac58a490646a425ebc41b33d149090aa5f48d1ca35c99", + "zh:e76cd202b03749f3082b0cbe849fd2e731cf3f9a6aa994d2d629602c3aede36c", + ] +} diff --git a/environments/eliza-agent-api/components/vpc/main.tf b/environments/eliza-agent-api/components/vpc/main.tf new file mode 100644 index 0000000..4374c88 --- /dev/null +++ b/environments/eliza-agent-api/components/vpc/main.tf @@ -0,0 +1,78 @@ +#from https://github.com/terraform-aws-modules/terraform-aws-vpc + +data "aws_availability_zones" "available" {} +variable name {} +variable aws_availability_zones {} +locals { + name = var.name + vpc_cidr = "10.0.0.0/16" + + #azs = slice(data.aws_availability_zones.available.names, 0, 3) + azs = var.aws_availability_zones + + tags = { + project = local.name + # GithubRepo = "terraform-aws-vpc" + # GithubOrg = "terraform-aws-modules" + } +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + name = local.name + cidr = local.vpc_cidr + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 4)] + # database_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 8)] + # elasticache_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 12)] + # redshift_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 16)] + # intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 20)] + private_subnet_names = ["Private Subnet One", "Private Subnet Two"] + # # public_subnet_names omitted to show default name generation for all three subnets + # database_subnet_names = ["DB Subnet One"] + # elasticache_subnet_names = ["Elasticache Subnet One", "Elasticache Subnet Two"] + # redshift_subnet_names = ["Redshift Subnet One", "Redshift Subnet Two", "Redshift Subnet Three"] + # intra_subnet_names = [] + # create_database_subnet_group = false + # manage_default_network_acl = false + # manage_default_route_table = false + # manage_default_security_group = false + # enable_dns_hostnames = true + # enable_dns_support = true + enable_nat_gateway = false + # single_nat_gateway = false + # customer_gateways = { + # IP1 = { + # bgp_asn = 65112 + # ip_address = "1.2.3.4" + # device_name = "some_name" + # }, + # IP2 = { + # bgp_asn = 65112 + # ip_address = "5.6.7.8" + # } + # } + # enable_vpn_gateway = true + # enable_dhcp_options = true + # dhcp_options_domain_name = "service.consul" + # dhcp_options_domain_name_servers = ["127.0.0.1", "10.10.0.2"] + + # # VPC Flow Logs (Cloudwatch log group and IAM role will be created) + # vpc_flow_log_iam_role_name = "vpc-complete-example-role" + # vpc_flow_log_iam_role_use_name_prefix = false + # enable_flow_log = true + # create_flow_log_cloudwatch_log_group = true + # create_flow_log_cloudwatch_iam_role = true + # flow_log_max_aggregation_interval = 60 + + tags = local.tags +} + +output "vpc" { + value = module.vpc +} + +output "azs" { + value = data.aws_availability_zones.available.names +} diff --git a/environments/eliza-agent-api/components/vpc/outputs.tf b/environments/eliza-agent-api/components/vpc/outputs.tf new file mode 100644 index 0000000..470ceae --- /dev/null +++ b/environments/eliza-agent-api/components/vpc/outputs.tf @@ -0,0 +1,558 @@ +output "vpc_id" { + description = "The ID of the VPC" + value = module.vpc.vpc_id +} + +output "vpc_arn" { + description = "The ARN of the VPC" + value = module.vpc.vpc_arn +} + +output "vpc_cidr_block" { + description = "The CIDR block of the VPC" + value = module.vpc.vpc_cidr_block +} + +output "default_security_group_id" { + description = "The ID of the security group created by default on VPC creation" + value = module.vpc.default_security_group_id +} + +output "default_network_acl_id" { + description = "The ID of the default network ACL" + value = module.vpc.default_network_acl_id +} + +output "default_route_table_id" { + description = "The ID of the default route table" + value = module.vpc.default_route_table_id +} + +output "vpc_instance_tenancy" { + description = "Tenancy of instances spin up within VPC" + value = module.vpc.vpc_instance_tenancy +} + +output "vpc_enable_dns_support" { + description = "Whether or not the VPC has DNS support" + value = module.vpc.vpc_enable_dns_support +} + +output "vpc_enable_dns_hostnames" { + description = "Whether or not the VPC has DNS hostname support" + value = module.vpc.vpc_enable_dns_hostnames +} + +output "vpc_main_route_table_id" { + description = "The ID of the main route table associated with this VPC" + value = module.vpc.vpc_main_route_table_id +} + +output "vpc_ipv6_association_id" { + description = "The association ID for the IPv6 CIDR block" + value = module.vpc.vpc_ipv6_association_id +} + +output "vpc_ipv6_cidr_block" { + description = "The IPv6 CIDR block" + value = module.vpc.vpc_ipv6_cidr_block +} + +output "vpc_secondary_cidr_blocks" { + description = "List of secondary CIDR blocks of the VPC" + value = module.vpc.vpc_secondary_cidr_blocks +} + +output "vpc_owner_id" { + description = "The ID of the AWS account that owns the VPC" + value = module.vpc.vpc_owner_id +} + +output "private_subnets" { + description = "List of IDs of private subnets" + value = module.vpc.private_subnets +} + +output "private_subnet_arns" { + description = "List of ARNs of private subnets" + value = module.vpc.private_subnet_arns +} + +output "private_subnets_cidr_blocks" { + description = "List of cidr_blocks of private subnets" + value = module.vpc.private_subnets_cidr_blocks +} + +output "private_subnets_ipv6_cidr_blocks" { + description = "List of IPv6 cidr_blocks of private subnets in an IPv6 enabled VPC" + value = module.vpc.private_subnets_ipv6_cidr_blocks +} + +output "public_subnets" { + description = "List of IDs of public subnets" + value = module.vpc.public_subnets +} + +output "public_subnet_arns" { + description = "List of ARNs of public subnets" + value = module.vpc.public_subnet_arns +} + +output "public_subnets_cidr_blocks" { + description = "List of cidr_blocks of public subnets" + value = module.vpc.public_subnets_cidr_blocks +} + +output "public_subnets_ipv6_cidr_blocks" { + description = "List of IPv6 cidr_blocks of public subnets in an IPv6 enabled VPC" + value = module.vpc.public_subnets_ipv6_cidr_blocks +} + +# output "outpost_subnets" { +# description = "List of IDs of outpost subnets" +# value = module.vpc.outpost_subnets +# } + +# output "outpost_subnet_arns" { +# description = "List of ARNs of outpost subnets" +# value = module.vpc.outpost_subnet_arns +# } + +# output "outpost_subnets_cidr_blocks" { +# description = "List of cidr_blocks of outpost subnets" +# value = module.vpc.outpost_subnets_cidr_blocks +# } + +# output "outpost_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of outpost subnets in an IPv6 enabled VPC" +# value = module.vpc.outpost_subnets_ipv6_cidr_blocks +# } + +# output "database_subnets" { +# description = "List of IDs of database subnets" +# value = module.vpc.database_subnets +# } + +# output "database_subnet_arns" { +# description = "List of ARNs of database subnets" +# value = module.vpc.database_subnet_arns +# } + +# output "database_subnets_cidr_blocks" { +# description = "List of cidr_blocks of database subnets" +# value = module.vpc.database_subnets_cidr_blocks +# } + +# output "database_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of database subnets in an IPv6 enabled VPC" +# value = module.vpc.database_subnets_ipv6_cidr_blocks +# } + +# output "database_subnet_group" { +# description = "ID of database subnet group" +# value = module.vpc.database_subnet_group +# } + +# output "database_subnet_group_name" { +# description = "Name of database subnet group" +# value = module.vpc.database_subnet_group_name +# } + +# output "redshift_subnets" { +# description = "List of IDs of redshift subnets" +# value = module.vpc.redshift_subnets +# } + +# output "redshift_subnet_arns" { +# description = "List of ARNs of redshift subnets" +# value = module.vpc.redshift_subnet_arns +# } + +# output "redshift_subnets_cidr_blocks" { +# description = "List of cidr_blocks of redshift subnets" +# value = module.vpc.redshift_subnets_cidr_blocks +# } + +# output "redshift_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of redshift subnets in an IPv6 enabled VPC" +# value = module.vpc.redshift_subnets_ipv6_cidr_blocks +# } + +# output "redshift_subnet_group" { +# description = "ID of redshift subnet group" +# value = module.vpc.redshift_subnet_group +# } + +# output "elasticache_subnets" { +# description = "List of IDs of elasticache subnets" +# value = module.vpc.elasticache_subnets +# } + +# output "elasticache_subnet_arns" { +# description = "List of ARNs of elasticache subnets" +# value = module.vpc.elasticache_subnet_arns +# } + +# output "elasticache_subnets_cidr_blocks" { +# description = "List of cidr_blocks of elasticache subnets" +# value = module.vpc.elasticache_subnets_cidr_blocks +# } + +# output "elasticache_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of elasticache subnets in an IPv6 enabled VPC" +# value = module.vpc.elasticache_subnets_ipv6_cidr_blocks +# } + +# output "intra_subnets" { +# description = "List of IDs of intra subnets" +# value = module.vpc.intra_subnets +# } + +# output "intra_subnet_arns" { +# description = "List of ARNs of intra subnets" +# value = module.vpc.intra_subnet_arns +# } + +# output "intra_subnets_cidr_blocks" { +# description = "List of cidr_blocks of intra subnets" +# value = module.vpc.intra_subnets_cidr_blocks +# } + +# output "intra_subnets_ipv6_cidr_blocks" { +# description = "List of IPv6 cidr_blocks of intra subnets in an IPv6 enabled VPC" +# value = module.vpc.intra_subnets_ipv6_cidr_blocks +# } + +# output "elasticache_subnet_group" { +# description = "ID of elasticache subnet group" +# value = module.vpc.elasticache_subnet_group +# } + +# output "elasticache_subnet_group_name" { +# description = "Name of elasticache subnet group" +# value = module.vpc.elasticache_subnet_group_name +# } + +output "public_route_table_ids" { + description = "List of IDs of public route tables" + value = module.vpc.public_route_table_ids +} + +output "private_route_table_ids" { + description = "List of IDs of private route tables" + value = module.vpc.private_route_table_ids +} + +# output "database_route_table_ids" { +# description = "List of IDs of database route tables" +# value = module.vpc.database_route_table_ids +# } + +# output "redshift_route_table_ids" { +# description = "List of IDs of redshift route tables" +# value = module.vpc.redshift_route_table_ids +# } + +# output "elasticache_route_table_ids" { +# description = "List of IDs of elasticache route tables" +# value = module.vpc.elasticache_route_table_ids +# } + +# output "intra_route_table_ids" { +# description = "List of IDs of intra route tables" +# value = module.vpc.intra_route_table_ids +# } + +output "public_internet_gateway_route_id" { + description = "ID of the internet gateway route" + value = module.vpc.public_internet_gateway_route_id +} + +output "public_internet_gateway_ipv6_route_id" { + description = "ID of the IPv6 internet gateway route" + value = module.vpc.public_internet_gateway_ipv6_route_id +} + +# output "database_internet_gateway_route_id" { +# description = "ID of the database internet gateway route" +# value = module.vpc.database_internet_gateway_route_id +# } + +# output "database_nat_gateway_route_ids" { +# description = "List of IDs of the database nat gateway route" +# value = module.vpc.database_nat_gateway_route_ids +# } + +# output "database_ipv6_egress_route_id" { +# description = "ID of the database IPv6 egress route" +# value = module.vpc.database_ipv6_egress_route_id +# } + +output "private_nat_gateway_route_ids" { + description = "List of IDs of the private nat gateway route" + value = module.vpc.private_nat_gateway_route_ids +} + +output "private_ipv6_egress_route_ids" { + description = "List of IDs of the ipv6 egress route" + value = module.vpc.private_ipv6_egress_route_ids +} + +output "private_route_table_association_ids" { + description = "List of IDs of the private route table association" + value = module.vpc.private_route_table_association_ids +} + +# output "database_route_table_association_ids" { +# description = "List of IDs of the database route table association" +# value = module.vpc.database_route_table_association_ids +# } + +# output "redshift_route_table_association_ids" { +# description = "List of IDs of the redshift route table association" +# value = module.vpc.redshift_route_table_association_ids +# } + +# output "redshift_public_route_table_association_ids" { +# description = "List of IDs of the public redshift route table association" +# value = module.vpc.redshift_public_route_table_association_ids +# } + +# output "elasticache_route_table_association_ids" { +# description = "List of IDs of the elasticache route table association" +# value = module.vpc.elasticache_route_table_association_ids +# } + +# output "intra_route_table_association_ids" { +# description = "List of IDs of the intra route table association" +# value = module.vpc.intra_route_table_association_ids +# } + +output "public_route_table_association_ids" { + description = "List of IDs of the public route table association" + value = module.vpc.public_route_table_association_ids +} + +output "dhcp_options_id" { + description = "The ID of the DHCP options" + value = module.vpc.dhcp_options_id +} + +output "nat_ids" { + description = "List of allocation ID of Elastic IPs created for AWS NAT Gateway" + value = module.vpc.nat_ids +} + +output "nat_public_ips" { + description = "List of public Elastic IPs created for AWS NAT Gateway" + value = module.vpc.nat_public_ips +} + +output "natgw_ids" { + description = "List of NAT Gateway IDs" + value = module.vpc.natgw_ids +} + +output "igw_id" { + description = "The ID of the Internet Gateway" + value = module.vpc.igw_id +} + +output "igw_arn" { + description = "The ARN of the Internet Gateway" + value = module.vpc.igw_arn +} + +output "egress_only_internet_gateway_id" { + description = "The ID of the egress only Internet Gateway" + value = module.vpc.egress_only_internet_gateway_id +} + +output "cgw_ids" { + description = "List of IDs of Customer Gateway" + value = module.vpc.cgw_ids +} + +output "cgw_arns" { + description = "List of ARNs of Customer Gateway" + value = module.vpc.cgw_arns +} + +output "this_customer_gateway" { + description = "Map of Customer Gateway attributes" + value = module.vpc.this_customer_gateway +} + +output "vgw_id" { + description = "The ID of the VPN Gateway" + value = module.vpc.vgw_id +} + +output "vgw_arn" { + description = "The ARN of the VPN Gateway" + value = module.vpc.vgw_arn +} + +output "default_vpc_id" { + description = "The ID of the Default VPC" + value = module.vpc.default_vpc_id +} + +output "default_vpc_arn" { + description = "The ARN of the Default VPC" + value = module.vpc.default_vpc_arn +} + +output "default_vpc_cidr_block" { + description = "The CIDR block of the Default VPC" + value = module.vpc.default_vpc_cidr_block +} + +output "default_vpc_default_security_group_id" { + description = "The ID of the security group created by default on Default VPC creation" + value = module.vpc.default_vpc_default_security_group_id +} + +output "default_vpc_default_network_acl_id" { + description = "The ID of the default network ACL of the Default VPC" + value = module.vpc.default_vpc_default_network_acl_id +} + +output "default_vpc_default_route_table_id" { + description = "The ID of the default route table of the Default VPC" + value = module.vpc.default_vpc_default_route_table_id +} + +output "default_vpc_instance_tenancy" { + description = "Tenancy of instances spin up within Default VPC" + value = module.vpc.default_vpc_instance_tenancy +} + +output "default_vpc_enable_dns_support" { + description = "Whether or not the Default VPC has DNS support" + value = module.vpc.default_vpc_enable_dns_support +} + +output "default_vpc_enable_dns_hostnames" { + description = "Whether or not the Default VPC has DNS hostname support" + value = module.vpc.default_vpc_enable_dns_hostnames +} + +output "default_vpc_main_route_table_id" { + description = "The ID of the main route table associated with the Default VPC" + value = module.vpc.default_vpc_main_route_table_id +} + +output "public_network_acl_id" { + description = "ID of the public network ACL" + value = module.vpc.public_network_acl_id +} + +output "public_network_acl_arn" { + description = "ARN of the public network ACL" + value = module.vpc.public_network_acl_arn +} + +output "private_network_acl_id" { + description = "ID of the private network ACL" + value = module.vpc.private_network_acl_id +} + +output "private_network_acl_arn" { + description = "ARN of the private network ACL" + value = module.vpc.private_network_acl_arn +} + +# output "outpost_network_acl_id" { +# description = "ID of the outpost network ACL" +# value = module.vpc.outpost_network_acl_id +# } + +# output "outpost_network_acl_arn" { +# description = "ARN of the outpost network ACL" +# value = module.vpc.outpost_network_acl_arn +# } + +# output "intra_network_acl_id" { +# description = "ID of the intra network ACL" +# value = module.vpc.intra_network_acl_id +# } + +# output "intra_network_acl_arn" { +# description = "ARN of the intra network ACL" +# value = module.vpc.intra_network_acl_arn +# } + +# output "database_network_acl_id" { +# description = "ID of the database network ACL" +# value = module.vpc.database_network_acl_id +# } + +# output "database_network_acl_arn" { +# description = "ARN of the database network ACL" +# value = module.vpc.database_network_acl_arn +# } + +# output "redshift_network_acl_id" { +# description = "ID of the redshift network ACL" +# value = module.vpc.redshift_network_acl_id +# } + +# output "redshift_network_acl_arn" { +# description = "ARN of the redshift network ACL" +# value = module.vpc.redshift_network_acl_arn +# } + +# output "elasticache_network_acl_id" { +# description = "ID of the elasticache network ACL" +# value = module.vpc.elasticache_network_acl_id +# } + +# output "elasticache_network_acl_arn" { +# description = "ARN of the elasticache network ACL" +# value = module.vpc.elasticache_network_acl_arn +# } + +# VPC flow log +output "vpc_flow_log_id" { + description = "The ID of the Flow Log resource" + value = module.vpc.vpc_flow_log_id +} + +output "vpc_flow_log_destination_arn" { + description = "The ARN of the destination for VPC Flow Logs" + value = module.vpc.vpc_flow_log_destination_arn +} + +output "vpc_flow_log_destination_type" { + description = "The type of the destination for VPC Flow Logs" + value = module.vpc.vpc_flow_log_destination_type +} + +output "vpc_flow_log_cloudwatch_iam_role_arn" { + description = "The ARN of the IAM role used when pushing logs to Cloudwatch log group" + value = module.vpc.vpc_flow_log_cloudwatch_iam_role_arn +} + +# VPC endpoints +# output "vpc_endpoints" { +# description = "Array containing the full resource object and attributes for all endpoints created" +# value = module.vpc_endpoints.endpoints +# } + +# output "vpc_endpoints_security_group_arn" { +# description = "Amazon Resource Name (ARN) of the security group" +# value = module.vpc_endpoints.security_group_arn +# } + +# output "vpc_endpoints_security_group_id" { +# description = "ID of the security group" +# value = module.vpc_endpoints.security_group_id +# } +output "ec2_public_subnet_id_2" { + value = module.vpc.public_subnets[1] +} + +output "ec2_public_subnet_id_1" { + value = module.vpc.public_subnets[0] +} diff --git a/environments/eliza-agent-api/components/vpc/versions.tf b/environments/eliza-agent-api/components/vpc/versions.tf new file mode 100644 index 0000000..e69de29 diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/work_queue/Readme.md b/environments/eliza-agent-api/components/work_queue/Readme.md similarity index 100% rename from environments/swarms-aws-agent-api/dev/us-east-1/components/work_queue/Readme.md rename to environments/eliza-agent-api/components/work_queue/Readme.md diff --git a/environments/eliza-agent-api/dev/us-east-1/.terraform.lock.hcl b/environments/eliza-agent-api/dev/us-east-1/.terraform.lock.hcl new file mode 100644 index 0000000..20cd0a1 --- /dev/null +++ b/environments/eliza-agent-api/dev/us-east-1/.terraform.lock.hcl @@ -0,0 +1,42 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/aws" { + version = "5.81.0" + constraints = ">= 5.55.0" + hashes = [ + "h1:ird967uf44WBZ0u9rMlOdO4nCKJZMlUknLcD3lz4dWs=", + "zh:0d0c7cc1e16b16cd00ab36de35038162e1871b51a902e9016d08c55c9fc4de35", + "zh:0e4b8c6e46999015292ab4fb9a430ab00107a276f25243552cde66db53c58661", + "zh:30041314cdd4e877d75ee8c9d36aecfca094e276f7a3d8150f929cf5169b2fa5", + "zh:5ebd248ce3e5a7ef9cc2f41499668f182146325e10ea305c70469122f6161a13", + "zh:888a69d371373b418549e03f5922becb2b8074cb463552ecfa65f30455708db0", + "zh:8a21bb7fe60383ff5ca9db8630a287e41fd520d2514c08874a16dc74500fadd7", + "zh:9c4663dcbfe58544642d70ebfdc6c5fa91592ff04164f77c655e32e6024483e2", + "zh:b322873f1209443a8407d5f782d7d917de6a1391239dbd0e7f809ce6507bed76", + "zh:b7c9d5ca14b818b5932ac58a490646a425ebc41b33d149090aa5f48d1ca35c99", + "zh:e76cd202b03749f3082b0cbe849fd2e731cf3f9a6aa994d2d629602c3aede36c", + ] +} + +provider "registry.opentofu.org/hashicorp/cloudflare" { + version = "4.48.0" + hashes = [ + "h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=", + "zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c", + "zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997", + "zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b", + "zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb", + "zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153", + "zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8", + "zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f", + "zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04", + "zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937", + "zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49", + "zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f", + "zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c", + "zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532", + "zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f", + "zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758", + ] +} diff --git a/environments/eliza-agent-api/eliza/main.tf b/environments/eliza-agent-api/eliza/main.tf new file mode 100644 index 0000000..3b75b13 --- /dev/null +++ b/environments/eliza-agent-api/eliza/main.tf @@ -0,0 +1,61 @@ +variable name {} +#variable ami_name {} +variable branch {} +variable git_repo {} + +variable "ec2_subnet_id" {} +#variable "alb_target_group_arn" {} +variable "aws_account_id" {} +variable "vpc_id" {} +variable "ami_id" {} +variable "internal_security_group_id" {} +variable "iam_instance_profile_name" {} +variable "ssm_profile_arn" {} +variable "key_name" {} +variable "region" {} +variable "tags" {} + +variable "instance_types" { + type = list(string) +} + +module "lt_docker" { + source = "../components/launch_template_docker_mcs" + branch = var.branch + vpc_id = var.vpc_id + for_each = toset(var.instance_types) + instance_type = each.key + name = "agent-docker-${each.key}" + security_group_id = var.internal_security_group_id + ami_id = var.ami_id + git_repo = var.git_repo + app_name = "agent" # used to construct /opt/agent for where we install to + tags = merge(var.tags, { + environment = "agent" + }) + + key_name = var.key_name #"mdupont-deployer-key" + ssm_parameter_name_cw_agent_config = "arn:aws:ssm:${var.region}:${var.aws_account_id}:parameter/cloudwatch-agent/config/details" + iam_instance_profile_name = var.iam_instance_profile_name + #install_script = "/opt/agent/api/docker-boot.sh" this is called from ssm for a refresh + install_script = "/opt/agent/rundocker.sh" +} + + + +module "asg" { + source = "../components/autoscaling_group/spot" + tags = var.tags + vpc_id = var.vpc_id + image_id = var.ami_id + ec2_subnet_id = var.ec2_subnet_id + for_each = toset(var.instance_types) + aws_iam_instance_profile_ssm_arn = var.ssm_profile_arn + + + # security_group_id = module.security.internal_security_group_id + instance_type = each.key + name = "${var.name}-${each.key}" + launch_template_id = module.lt_docker[each.key].launch_template_id +# target_group_arn = var.alb_target_group_arn +} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/main-1.tf b/environments/eliza-agent-api/main-1.tf similarity index 100% rename from environments/swarms-aws-agent-api/dev/us-east-1/main-1.tf rename to environments/eliza-agent-api/main-1.tf diff --git a/environments/eliza-agent-api/main.tf b/environments/eliza-agent-api/main.tf new file mode 100644 index 0000000..cf6643b --- /dev/null +++ b/environments/eliza-agent-api/main.tf @@ -0,0 +1,83 @@ +variable repo{} +variable aws_availability_zones {} +variable instance_types {} +variable "spot_max_price" {} +variable "branch" {} +variable "project" {} +variable "region" {} +variable "domain" {} +variable "aws_account_id" {} +variable "ami_id" {} +variable "tags" {} +variable "name" {} +variable "key_name" {} + +locals { + name = var.project + domain = var.domain + tags = { + project = var.project + } + dev_tags = { } +} + +locals { + ami_id = var.ami_id +} + +module "vpc" { + source = "./components/vpc" + name = var.project + aws_availability_zones = var.aws_availability_zones + +} + +locals { + ec2_public_subnet_id_1 = module.vpc.ec2_public_subnet_id_1 + ec2_public_subnet_id_2 = module.vpc.ec2_public_subnet_id_2 + vpc_id = module.vpc.vpc_id +} + +module "security" { + source = "./components/security" + vpc_id = local.vpc_id + tags = local.tags + name = local.name +} + +module "kp" { + source = "./components/keypairs" +} + +module "roles" { + source = "./components/roles" + tags = local.tags +} + + +output "security_group_id" { + value = module.security.security_group_id +} + +module "eliza" { + source = "./eliza" # fixme rename to eliza +# ami_name = var.ami_name + branch = var.branch + git_repo = var.repo + ssm_profile_arn = module.roles.ssm_profile_arn + ec2_subnet_id = module.vpc.ec2_public_subnet_id_1 + iam_instance_profile_name = module.roles.ssm_profile_name + key_name = var.key_name + instance_types = var.instance_types + aws_account_id = var.aws_account_id + region = var.region + internal_security_group_id = module.security.internal_security_group_id + tags = local.tags + ami_id = local.ami_id + vpc_id = local.vpc_id + name = "docker-agent-ami" +} + +output "vpc" { + value = module.vpc +} diff --git a/environments/eliza-agent-api/refresh.sh b/environments/eliza-agent-api/refresh.sh new file mode 100644 index 0000000..40ce118 --- /dev/null +++ b/environments/eliza-agent-api/refresh.sh @@ -0,0 +1,2 @@ +aws autoscaling start-instance-refresh \ + --auto-scaling-group-name swarms-20241214050736278200000007 diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/rewrite.txt b/environments/eliza-agent-api/rewrite.txt similarity index 100% rename from environments/swarms-aws-agent-api/dev/us-east-1/rewrite.txt rename to environments/eliza-agent-api/rewrite.txt diff --git a/environments/eliza-agent-api/seed.md b/environments/eliza-agent-api/seed.md new file mode 100644 index 0000000..e49e882 --- /dev/null +++ b/environments/eliza-agent-api/seed.md @@ -0,0 +1,304 @@ +# #task :break this up into subtasks we will call you back for each subtask with this large text. +# # meta terraform quine meme +# # create a terraform module that will call you back with its text. +# # each output will be a new more refined terraform module that will be executed +# # and the results send back to you and documented. + +# # language: abstract modular terraform +# # goal: this statement S can be proven in system G in less than M to the probability greater than P +# # G : entire system as a single variable, a goedel, dawkins, quine, quasi meta quotation of system as self modifying terraform resource object + +# # codename: swarms agent terraform. +# # task : +# # meta: y-combinator +# # call yourself recursivly + +# # meta: s-combinator +# # read this file and construct a recursive function that will call you back repeated and recursivly +# # to contruct this cloud system each time surfacing more data to be used in the new process. +# # project: swarm terraform +# # mode: epic heros journey +# # journey : bootstrap +# # bootstrap: abstract native cloud +# # recursive evaluation +# # resources +# # 1. local kind cluster +# # 1.2 running argo cicd + +# # 1.2.1 creating resources +# # 1.2.1.1 build or buy +# # 1.2.1.1.1 market quote + +# # 1.1.1 creating other cloud resources + +# # 1.1.1.1 creating aws accounts + +# # 1.1.1.2 configuring secrets +# # 1.1.1.2.1 to hold wallets +# # 1.1.1.3 configuring IAM user authentication +# # 1.1.1.3.0 creating siloed accounts that have minimal access needed. +# # 1.1.1.3.1 configuring oauth providers on cloud +# # 1.1.1.3.1.1 auth0|cognito|github apps as entry points to our network. +# # 1.1.1.3.1.2 eth wallets as entry point +# # 1.1.1.3.1.3 otp links +# # 1.1.1.3.1.4 otp qr code +# # qr codes as urls as entry points +# # 1.1.1.3.1.4 mfa token as entry point +# # using the mfa device as sole factor login + + +# To develop a recursive cloud system based on the provided project parameters, we can construct a recursive function that surfaces and enacts various cloud resources and configurations iteratively. Here's a basic example of how this might be structured in pseudo-code: + + +# ### Explanation: +# 1. **Recursive Function**: `swarm_terraform(level)` serves as the main function that sets up resources based on the current recursion depth. +# 2. **Setup Functions**: Helper functions are defined for local clusters, CI/CD, AWS accounts, secrets, and IAM. +# 3. **Level-Based Logic**: The creation of resources and configurations is determined based on the recursion level, allowing the system to build incrementally. +# 4. **Termination Condition**: The recursion stops at a defined `MAX_LEVEL` to prevent infinite execution. + +# ### Steps to Execution: +# - Implement specific logic within each helper function as per your project needs. +# - Adjust the `MAX_LEVEL` variable based on how detailed or expansive you want the recursive processing to be. + +# This structure provides a solid foundation for building out the cloud infrastructure recursively, driven by the initial parameters you've outlined. + + +# To express this concept in a modular Terraform-style pseudo-language, you may want to define a resource that captures the essence of your statement S while incorporating the probabilistic and self-referential properties of system G. Here’s a concise example: + +# ```hcl +# resource "godel_dawkins_quine" "system_G" { +# name = "G" +# probability = var.P +# threshold = var.M + +# statement { +# content = "S can be proven in system G" +# complexity = calculate_complexity(content) # define this function for complexity measure +# proven_in_time_less_than_M = complexity < var.M +# } + +# self_modifying { +# enabled = true +# modifications = ["update_probability", "redefine_threshold"] +# } +# } + +# variable "P" { +# description = "Probability threshold" +# type = number +# default = 0.95 +# } + +# variable "M" { +# description = "Maximum time for proof" +# type = number +# default = 100 # example limit on time +# } +# ``` + + + + +# It seems there might have been a misunderstanding or you are looking for further clarification. Here's an extended Terraform module example that incorporates recursive processes and configurations for cloud resources based on your initial request: + +# ```hcl +# terraform { +# required_providers { +# aws = { +# source = "hashicorp/aws" +# version = "~> 3.0" +# } +# } +# } + +# variable "max_levels" { +# description = "Maximum level of recursion" +# type = number +# default = 3 +# } + +# resource "aws_iam_user" "swarm_user" { +# count = var.max_levels + +# name = "swarm_user_${count.index}" + +# tags = { +# Level = count.index +# } +# } + +# resource "aws_secretsmanager_secret" "swarm_secret" { +# count = var.max_levels + +# name = "swarm_secret_${count.index}" + +# description = "Secret for level ${count.index}" +# } + +# resource "aws_cognito_user_pool" "swarm_user_pool" { +# count = var.max_levels + +# name = "swarm_user_pool_${count.index}" +# alias_attributes = ["email"] + +# lambda_config { +# pre_sign_up = aws_lambda_function.pre_sign_up[count.index].arn +# } +# } + +# resource "aws_lambda_function" "pre_sign_up" { +# count = var.max_levels + +# function_name = "pre_sign_up_${count.index}" +# runtime = "nodejs14.x" + +# handler = "index.handler" +# source_code_hash = filebase64sha256("path_to_your_lambda_zip_${count.index}.zip") + +# role = aws_iam_role.lambda_exec.arn +# } + +# resource "aws_iam_role" "lambda_exec" { +# name = "lambda_exec_role" + +# assume_role_policy = jsonencode({ +# Version = "2012-10-17" +# Statement = [{ +# Action = "sts:AssumeRole" +# Principal = { +# Service = "lambda.amazonaws.com" +# } +# Effect = "Allow" +# Sid = "" +# }] +# }) +# } + +# output "swarm_users" { +# value = aws_iam_user.swarm_user[*].name +# } + +# output "swarm_secrets" { +# value = aws_secretsmanager_secret.swarm_secret[*].name +# } + +# output "swarm_user_pools" { +# value = aws_cognito_user_pool.swarm_user_pool[*].id +# } +# ``` + +# ### Key Components: + +# - **Recursive Resource Creation**: The use of the `count` parameter simulates recursion by creating multiple instances of resources based on the specified `max_levels`. + +# - **IAM Users and Secrets**: Creates IAM users and AWS secrets for each level. + +# - **Cognito User Pools**: Sets up a user pool for authentication at each recursion depth. + +# - **Outputs**: Outputs the created IAM users, secrets, and Cognito user pools, allowing you to track the resources created at each level. + +# ### Execution: +# - Adjust `max_levels` as needed to control the depth of resource creation. +# - Ensure that the paths to any Lambda function code or specific policies are set correctly. + +# This example captures the essence of a meta Terraform module while following the recursive and self-referential principles you've outlined. Let me know how you would like to expand or modify this! + + +# It seems you're looking for further examples or a continuation of the Terraform module design. Here's a more comprehensive modular approach that emphasizes recursion and the creation of various cloud resources, broken down into subtasks for clarity: + +# ### Main Terraform Configuration + +# ```hcl +# module "swarm_terraform" { +# source = "./modules/swarm" # Define the module source +# level = 1 # Start at recursion level 1 +# max_levels = var.max_levels # Maximum recursion depth +# aws_region = var.aws_region # AWS region variable +# } + +# variable "max_levels" { +# description = "Maximum depth for resource creation" +# type = number +# default = 3 +# } + +# variable "aws_region" { +# description = "AWS region to deploy resources" +# type = string +# default = "us-east-1" +# } +# ``` + +# ### Module: `modules/swarm/main.tf` + +# ```hcl +# resource "aws_iam_user" "swarm_user" { +# count = var.level <= var.max_levels ? 1 : 0 + +# name = "swarm_user_${var.level}" + +# tags = { +# Level = var.level +# } +# } + +# resource "aws_secretsmanager_secret" "swarm_secret" { +# count = var.level <= var.max_levels ? 1 : 0 + +# name = "swarm_secret_${var.level}" +# } + +# resource "aws_lambda_function" "pre_sign_up" { +# count = var.level <= var.max_levels ? 1 : 0 + +# function_name = "pre_sign_up_${var.level}" +# runtime = "nodejs14.x" +# handler = "index.handler" + +# # Assuming the source code is provided +# source_code_hash = filebase64sha256("path_to_your_lambda_zip_${var.level}.zip") +# role = aws_iam_role.lambda_exec.arn +# } + +# resource "aws_iam_role" "lambda_exec" { +# name = "lambda_exec_role" + +# assume_role_policy = jsonencode({ +# Version = "2012-10-17" +# Statement = [{ +# Action = "sts:AssumeRole" +# Principal = { +# Service = "lambda.amazonaws.com" +# } +# Effect = "Allow" +# Sid = "" +# }] +# }) +# } + +# # Recursive module call to create resources for the next level +# module "swarm_terraform_next" { +# source = "./main.tf" +# level = var.level + 1 +# max_levels = var.max_levels +# aws_region = var.aws_region +# depends_on = [aws_iam_user.swarm_user, aws_secretsmanager_secret.swarm_secret, aws_lambda_function.pre_sign_up] +# } +# ``` + +# ### Explanation: + +# 1. **Modular Structure**: This approach divides the resources into a module, making it reusable and expandable. + +# 2. **Recursion Handling**: The module checks if the current `level` is less than or equal to `max_levels`. If it is, it creates the corresponding resources and calls itself for the next level. + +# 3. **IAM User and Lambda Function**: Each recurred level creates new IAM users, Secrets Manager secrets, and Lambda functions, maintaining the recursion. + +# 4. **Outputs**: Outputs can be included to retrieve created resource details at the end of processing, although they are not explicitly shown in this snippet. + +# ### Execution Steps: +# - Ensure you have the appropriate IAM permissions and AWS configuration. +# - Adjust the `max_levels` variable as desired, and ensure the paths to any Lambda function source code are correctly defined. +# - Use `terraform apply` to trigger the recursion and monitor resource creation. + +# This format should give you a robust foundation for developing a complex, recursive Terraform setup. If you need further adjustments or detailed explanations on specific elements, let me know! diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md deleted file mode 100644 index 4f57e60..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md +++ /dev/null @@ -1,3 +0,0 @@ -1. create vpc -2. create ami -4. create iam roles and users diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md~ b/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md~ deleted file mode 100644 index a7b9d73..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/Readme.md~ +++ /dev/null @@ -1,2 +0,0 @@ -1. create vpc -2. create ami diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/components/Readme.md deleted file mode 100644 index 3f6653d..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/Readme.md +++ /dev/null @@ -1,12 +0,0 @@ -# Plan -## Phase 1 -1. create minimal ec2 instance in machine_image -2. create minimal ec2 ami from instance in machine_image -3. create autoscaling_group of size 1 for image -4. create application_load_balancer -5. create dns_entry -6. create cognito_user_pool for login -7. create work_queue -8. create lambda_workers on queue -9. create resource_launchers to create new resources. - diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/Readme.md deleted file mode 100644 index 5a93134..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/application_load_balancer/Readme.md +++ /dev/null @@ -1 +0,0 @@ -application_load_balancer diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/dns_entry/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/components/dns_entry/Readme.md deleted file mode 100644 index f725191..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/dns_entry/Readme.md +++ /dev/null @@ -1 +0,0 @@ -dns_entry diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Readme.md b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Readme.md deleted file mode 100644 index 2ee3737..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Readme.md +++ /dev/null @@ -1 +0,0 @@ -machine_image diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf deleted file mode 100644 index a438646..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -#TASK:write terraform packer for ubuntu python fastapi server with custom git modules - -#To create a Terraform configuration using Packer for an Ubuntu-based FastAPI server with custom Git modules, you'll need to follow a few steps. Below is a concise example of how to structure your Packer template and Terraform files. - -provider "aws" { - region = "us-east-1" -} - -data "aws_ami" "ubuntu" { - most_recent = true - - owners = ["099720109477"] # Ubuntu's account ID - filter { - name = "name" - values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] - } -} - -resource "aws_instance" "fastapi_server_test_instance" { - count = var.test_server_count - ami = data.aws_ami.ubuntu.id - instance_type = "t2.micro" - - # add in this user data - tags = { - Name = "FastAPI Server" - } -} - diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/ubuntu-fastapi.pkr.hcl b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/ubuntu-fastapi.pkr.hcl deleted file mode 100644 index c1d05aa..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/ubuntu-fastapi.pkr.hcl +++ /dev/null @@ -1,29 +0,0 @@ -packer { - required_plugins { - aws = { - source = "hashicorp/aws" - version = ">= 3.0.0" - } - } -} - -source "amazon-ebs" "ubuntu" { - ami_name = "ubuntu-fastapi-{{timestamp}}" - instance_type = "t2.micro" - region = "us-east-1" - source_ami = "ami-0c55b159cbfafe1f0" # Ubuntu 20.04 LTS - ssh_username = "ubuntu" - - provisioner "shell" { - inline = [ - "sudo apt-get update", - "sudo apt-get install -y python3 python3-pip git", - "pip3 install fastapi uvicorn", - "git clone https://github.com/yourusername/your-fastapi-module.git /app" - ] - } -} - -build { - sources = ["source.amazon-ebs.ubuntu"] -} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/variables.tf b/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/variables.tf deleted file mode 100644 index c36c95f..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/variables.tf +++ /dev/null @@ -1,3 +0,0 @@ -variable test_server_count { - value = 1 # how many test servers to run -} diff --git a/environments/swarms-aws-agent-api/dev/us-east-1/main.tf b/environments/swarms-aws-agent-api/dev/us-east-1/main.tf deleted file mode 100644 index fedbe36..0000000 --- a/environments/swarms-aws-agent-api/dev/us-east-1/main.tf +++ /dev/null @@ -1,304 +0,0 @@ -#task :break this up into subtasks we will call you back for each subtask with this large text. -# meta terraform quine meme -# create a terraform module that will call you back with its text. -# each output will be a new more refined terraform module that will be executed -# and the results send back to you and documented. - -# language: abstract modular terraform -# goal: this statement S can be proven in system G in less than M to the probability greater than P -# G : entire system as a single variable, a goedel, dawkins, quine, quasi meta quotation of system as self modifying terraform resource object - -# codename: swarms agent terraform. -# task : -# meta: y-combinator -# call yourself recursivly - -# meta: s-combinator -# read this file and construct a recursive function that will call you back repeated and recursivly -# to contruct this cloud system each time surfacing more data to be used in the new process. -# project: swarm terraform -# mode: epic heros journey -# journey : bootstrap -# bootstrap: abstract native cloud -# recursive evaluation -# resources -# 1. local kind cluster -# 1.2 running argo cicd - -# 1.2.1 creating resources -# 1.2.1.1 build or buy -# 1.2.1.1.1 market quote - -# 1.1.1 creating other cloud resources - -# 1.1.1.1 creating aws accounts - -# 1.1.1.2 configuring secrets -# 1.1.1.2.1 to hold wallets -# 1.1.1.3 configuring IAM user authentication -# 1.1.1.3.0 creating siloed accounts that have minimal access needed. -# 1.1.1.3.1 configuring oauth providers on cloud -# 1.1.1.3.1.1 auth0|cognito|github apps as entry points to our network. -# 1.1.1.3.1.2 eth wallets as entry point -# 1.1.1.3.1.3 otp links -# 1.1.1.3.1.4 otp qr code -# qr codes as urls as entry points -# 1.1.1.3.1.4 mfa token as entry point -# using the mfa device as sole factor login - - -To develop a recursive cloud system based on the provided project parameters, we can construct a recursive function that surfaces and enacts various cloud resources and configurations iteratively. Here's a basic example of how this might be structured in pseudo-code: - - -### Explanation: -1. **Recursive Function**: `swarm_terraform(level)` serves as the main function that sets up resources based on the current recursion depth. -2. **Setup Functions**: Helper functions are defined for local clusters, CI/CD, AWS accounts, secrets, and IAM. -3. **Level-Based Logic**: The creation of resources and configurations is determined based on the recursion level, allowing the system to build incrementally. -4. **Termination Condition**: The recursion stops at a defined `MAX_LEVEL` to prevent infinite execution. - -### Steps to Execution: -- Implement specific logic within each helper function as per your project needs. -- Adjust the `MAX_LEVEL` variable based on how detailed or expansive you want the recursive processing to be. - -This structure provides a solid foundation for building out the cloud infrastructure recursively, driven by the initial parameters you've outlined. - - -To express this concept in a modular Terraform-style pseudo-language, you may want to define a resource that captures the essence of your statement S while incorporating the probabilistic and self-referential properties of system G. Here’s a concise example: - -```hcl -resource "godel_dawkins_quine" "system_G" { - name = "G" - probability = var.P - threshold = var.M - - statement { - content = "S can be proven in system G" - complexity = calculate_complexity(content) # define this function for complexity measure - proven_in_time_less_than_M = complexity < var.M - } - - self_modifying { - enabled = true - modifications = ["update_probability", "redefine_threshold"] - } -} - -variable "P" { - description = "Probability threshold" - type = number - default = 0.95 -} - -variable "M" { - description = "Maximum time for proof" - type = number - default = 100 # example limit on time -} -``` - - - - -It seems there might have been a misunderstanding or you are looking for further clarification. Here's an extended Terraform module example that incorporates recursive processes and configurations for cloud resources based on your initial request: - -```hcl -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 3.0" - } - } -} - -variable "max_levels" { - description = "Maximum level of recursion" - type = number - default = 3 -} - -resource "aws_iam_user" "swarm_user" { - count = var.max_levels - - name = "swarm_user_${count.index}" - - tags = { - Level = count.index - } -} - -resource "aws_secretsmanager_secret" "swarm_secret" { - count = var.max_levels - - name = "swarm_secret_${count.index}" - - description = "Secret for level ${count.index}" -} - -resource "aws_cognito_user_pool" "swarm_user_pool" { - count = var.max_levels - - name = "swarm_user_pool_${count.index}" - alias_attributes = ["email"] - - lambda_config { - pre_sign_up = aws_lambda_function.pre_sign_up[count.index].arn - } -} - -resource "aws_lambda_function" "pre_sign_up" { - count = var.max_levels - - function_name = "pre_sign_up_${count.index}" - runtime = "nodejs14.x" - - handler = "index.handler" - source_code_hash = filebase64sha256("path_to_your_lambda_zip_${count.index}.zip") - - role = aws_iam_role.lambda_exec.arn -} - -resource "aws_iam_role" "lambda_exec" { - name = "lambda_exec_role" - - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [{ - Action = "sts:AssumeRole" - Principal = { - Service = "lambda.amazonaws.com" - } - Effect = "Allow" - Sid = "" - }] - }) -} - -output "swarm_users" { - value = aws_iam_user.swarm_user[*].name -} - -output "swarm_secrets" { - value = aws_secretsmanager_secret.swarm_secret[*].name -} - -output "swarm_user_pools" { - value = aws_cognito_user_pool.swarm_user_pool[*].id -} -``` - -### Key Components: - -- **Recursive Resource Creation**: The use of the `count` parameter simulates recursion by creating multiple instances of resources based on the specified `max_levels`. - -- **IAM Users and Secrets**: Creates IAM users and AWS secrets for each level. - -- **Cognito User Pools**: Sets up a user pool for authentication at each recursion depth. - -- **Outputs**: Outputs the created IAM users, secrets, and Cognito user pools, allowing you to track the resources created at each level. - -### Execution: -- Adjust `max_levels` as needed to control the depth of resource creation. -- Ensure that the paths to any Lambda function code or specific policies are set correctly. - -This example captures the essence of a meta Terraform module while following the recursive and self-referential principles you've outlined. Let me know how you would like to expand or modify this! - - -It seems you're looking for further examples or a continuation of the Terraform module design. Here's a more comprehensive modular approach that emphasizes recursion and the creation of various cloud resources, broken down into subtasks for clarity: - -### Main Terraform Configuration - -```hcl -module "swarm_terraform" { - source = "./modules/swarm" # Define the module source - level = 1 # Start at recursion level 1 - max_levels = var.max_levels # Maximum recursion depth - aws_region = var.aws_region # AWS region variable -} - -variable "max_levels" { - description = "Maximum depth for resource creation" - type = number - default = 3 -} - -variable "aws_region" { - description = "AWS region to deploy resources" - type = string - default = "us-east-1" -} -``` - -### Module: `modules/swarm/main.tf` - -```hcl -resource "aws_iam_user" "swarm_user" { - count = var.level <= var.max_levels ? 1 : 0 - - name = "swarm_user_${var.level}" - - tags = { - Level = var.level - } -} - -resource "aws_secretsmanager_secret" "swarm_secret" { - count = var.level <= var.max_levels ? 1 : 0 - - name = "swarm_secret_${var.level}" -} - -resource "aws_lambda_function" "pre_sign_up" { - count = var.level <= var.max_levels ? 1 : 0 - - function_name = "pre_sign_up_${var.level}" - runtime = "nodejs14.x" - handler = "index.handler" - - # Assuming the source code is provided - source_code_hash = filebase64sha256("path_to_your_lambda_zip_${var.level}.zip") - role = aws_iam_role.lambda_exec.arn -} - -resource "aws_iam_role" "lambda_exec" { - name = "lambda_exec_role" - - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [{ - Action = "sts:AssumeRole" - Principal = { - Service = "lambda.amazonaws.com" - } - Effect = "Allow" - Sid = "" - }] - }) -} - -# Recursive module call to create resources for the next level -module "swarm_terraform_next" { - source = "./main.tf" - level = var.level + 1 - max_levels = var.max_levels - aws_region = var.aws_region - depends_on = [aws_iam_user.swarm_user, aws_secretsmanager_secret.swarm_secret, aws_lambda_function.pre_sign_up] -} -``` - -### Explanation: - -1. **Modular Structure**: This approach divides the resources into a module, making it reusable and expandable. - -2. **Recursion Handling**: The module checks if the current `level` is less than or equal to `max_levels`. If it is, it creates the corresponding resources and calls itself for the next level. - -3. **IAM User and Lambda Function**: Each recurred level creates new IAM users, Secrets Manager secrets, and Lambda functions, maintaining the recursion. - -4. **Outputs**: Outputs can be included to retrieve created resource details at the end of processing, although they are not explicitly shown in this snippet. - -### Execution Steps: -- Ensure you have the appropriate IAM permissions and AWS configuration. -- Adjust the `max_levels` variable as desired, and ensure the paths to any Lambda function source code are correctly defined. -- Use `terraform apply` to trigger the recursion and monitor resource creation. - -This format should give you a robust foundation for developing a complex, recursive Terraform setup. If you need further adjustments or detailed explanations on specific elements, let me know! diff --git a/environments/swarms-deploy/dev/us-east-1/components/autoscaling_group/main.tf b/environments/swarms-deploy/dev/us-east-1/components/autoscaling_group/main.tf new file mode 100644 index 0000000..3d2d550 --- /dev/null +++ b/environments/swarms-deploy/dev/us-east-1/components/autoscaling_group/main.tf @@ -0,0 +1,130 @@ +variable "aws_iam_instance_profile_ssm_arn" {} +#variable target_group_arn{} +variable "name" {} +variable "instance_type" {} +variable "launch_template_id" {} +variable "image_id" {} +variable "vpc_id" {} +variable "tags" {} +variable "ec2_subnet_id" {} + +locals { + iam_instance_profile_arn = var.aws_iam_instance_profile_ssm_arn + instance_type = var.instance_type +} + +module "autoscaling" { + source = "terraform-aws-modules/autoscaling/aws" + version = "8.0.0" + name = var.name + + health_check_type = "EC2" + desired_capacity = 1 + max_size = 5 + min_size = 1 + + create_launch_template = false + update_default_version = true + + launch_template_id = var.launch_template_id + launch_template_version = "$Latest" + + vpc_zone_identifier = [var.ec2_subnet_id] + + instance_market_options = { + market_type = "spot" + } + network_interfaces = [{ + associate_public_ip_address = true + device_index = 0 + delete_on_termination = true + description = "interface1" + # security_groups = [var.security_group_id] + } + ] + instance_type = var.instance_type + image_id = var.image_id + + create_iam_instance_profile = true + iam_role_name = "ssm-${var.name}" + iam_role_path = "/ec2/" + iam_role_description = "SSM IAM role for swarms" + iam_role_tags = { + CustomIamRole = "Yes" + } + + iam_role_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + + # # target_group_arn = + # traffic_source_attachments = { + # ex-alb = { + # traffic_source_identifier = var.target_group_arn + # traffic_source_type = "elbv2" # default + # } + # } + + # Target scaling policy schedule based on average CPU load + scaling_policies = { + avg-cpu-policy-greater-than-50 = { + policy_type = "TargetTrackingScaling" + estimated_instance_warmup = 1200 + target_tracking_configuration = { + predefined_metric_specification = { + predefined_metric_type = "ASGAverageCPUUtilization" + } + target_value = 50.0 + } + }, + predictive-scaling = { + policy_type = "PredictiveScaling" + predictive_scaling_configuration = { + mode = "ForecastAndScale" + scheduling_buffer_time = 10 + max_capacity_breach_behavior = "IncreaseMaxCapacity" + max_capacity_buffer = 10 + metric_specification = { + target_value = 32 + predefined_scaling_metric_specification = { + predefined_metric_type = "ASGAverageCPUUtilization" + resource_label = "testLabel" + } + predefined_load_metric_specification = { + predefined_metric_type = "ASGTotalCPUUtilization" + resource_label = "testLabel" + } + } + } + } + # request-count-per-target = { + # policy_type = "TargetTrackingScaling" + # estimated_instance_warmup = 120 + # target_tracking_configuration = { + # predefined_metric_specification = { + # predefined_metric_type = "ALBRequestCountPerTarget" + # resource_label = "swarms1" + # #"${module.alb.arn_suffix}/${module.alb.target_groups["ex_asg"].arn_suffix}" + # } + # target_value = 800 + # } + # } + scale-out = { + name = "scale-out" + adjustment_type = "ExactCapacity" + policy_type = "StepScaling" + estimated_instance_warmup = 120 + step_adjustment = [ + { + scaling_adjustment = 1 + metric_interval_lower_bound = 0 + metric_interval_upper_bound = 10 + }, + { + scaling_adjustment = 2 + metric_interval_lower_bound = 10 + } + ] + } + } +} diff --git a/environments/swarms-deploy/dev/us-east-1/components/launch_template/main.tf b/environments/swarms-deploy/dev/us-east-1/components/launch_template/main.tf new file mode 100644 index 0000000..511bb41 --- /dev/null +++ b/environments/swarms-deploy/dev/us-east-1/components/launch_template/main.tf @@ -0,0 +1,80 @@ +variable "install_script" {} +variable "iam_instance_profile_name" {} +variable "security_group_id" {} +variable "name" {} +variable "vpc_id" {} +variable "ami_id" {} +variable "tags" {} +variable "key_name" { + default = "mdupont-deployer-key" +} + +# dont use this +variable "instance_type" {} + +locals { + tags = { + project = "swarms" + instance_type = var.instance_type + name = var.name + } +} +resource "aws_launch_template" "ec2_launch_template" { + name_prefix = "${var.name}-launch-template-" + image_id = var.ami_id + key_name = var.key_name + instance_type = var.instance_type + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [var.security_group_id] + } + + iam_instance_profile { + # iam_instance_profile_arn = aws_iam_instance_profile.ssm.arn + name = var.iam_instance_profile_name #aws_iam_instance_profile.ec2_instance_profile.name + } + lifecycle { + create_before_destroy = true + } + block_device_mappings { + device_name = "/dev/sda1" + ebs { + volume_size = 30 + volume_type = "gp3" + encrypted = true + } + } + + user_data = base64encode(<<-EOF + #!/bin/bash + export HOME=/root + apt update + apt-get install -y ec2-instance-connect git virtualenv + snap install amazon-ssm-agent --classic || echo oops1 + snap start amazon-ssm-agent || echo oops2 + apt-get install -y --no-install-recommends ca-certificates=20230311 curl=7.88.1-10+deb12u7 | echo oops + curl -O "https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/$(dpkg --print-architecture)/latest/amazon-cloudwatch-agent.deb" + dpkg -i -E amazon-cloudwatch-agent.deb + + if [ ! -d "/opt/swarms/" ]; then + git clone https://github.com/jmikedupont2/SwarmDeploy "/opt/swarms/" + fi + cd "/opt/swarms/" || exit 1 + export BRANCH=main + git stash + git checkout --force $BRANCH + git pull # get the latest version + bash -x ${var.install_script} + EOF + ) + tags = var.tags +} + + +output "lt" { + value = resource.aws_launch_template.ec2_launch_template +} +output "launch_template_id" { + value = resource.aws_launch_template.ec2_launch_template.id +} diff --git a/environments/swarms-deploy/dev/us-east-1/main.tf b/environments/swarms-deploy/dev/us-east-1/main.tf new file mode 100644 index 0000000..4d87cec --- /dev/null +++ b/environments/swarms-deploy/dev/us-east-1/main.tf @@ -0,0 +1,172 @@ +variable "ssm_profile_arn" {} +variable "ssm_profile_name" {} +variable "vpc_id" {} +variable "subnet_id" {} +locals { + # instance_type = "t3.large" + # instance_type = "t3.medium" + ami_name = "ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*" + name = "swarmdeploy" + region = "us-east-2" + domain = var.domain + tags = { + project = "swarmdeploy" + } +} +variable "domain" {} +variable "ami_id" {} +variable "tags" {} +variable "name" {} + + +locals { + ami_id = var.ami_id + #new_ami_id = "ami-08093b6770af41b14" # environments/swarms-aws-agent-api/dev/us-east-1/components/machine_image/Readme.md +} + +# SLOW +locals { + root = "../../../swarms-aws-agent-api/dev/us-east-1/components/" +} +#module "vpc" { +# source = "${local.root}/vpc" +#} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +locals { + # ec2_public_subnet_id_1 = module.vpc.ec2_public_subnet_id_1 + # ec2_public_subnet_id_2 = module.vpc.ec2_public_subnet_id_2 + #vpc_id = module.vpc.vpc_id + vpc_id = var.vpc_id +} + +module "security" { + source = "${local.root}/security" + vpc_id = local.vpc_id + tags = local.tags + name = local.name +} + +#module "kp" { +# source = "${local.root}/keypairs" +#} + +# module "lt" { +# instance_type = local.instance_type +# security_group_id = module.security.security_group_id +# source = "./components/launch_template" +# } + +# module "asg" { +# source = "./components/autoscaling_group" +# name="swarms" +# security_group_id = module.security.security_group_id +# instance_type = local.instance_type +# launch_template_id = module.lt.launch_template_id +# } + +variable "instance_types" { + type = list(string) + default = [ + # "t4g.nano", "t3a.nano", "t3.nano", "t2.nano", + # "t4g.micro", "t3a.micro", "t3.micro", "t2.micro", "t1.micro", + #"t4g.small", "t3a.small", + #"t3.small", + #"t2.small", not working + # "t2.medium" # + "t3.medium" + ] +} + +#module "roles" { +# source = "${local.root}/roles" +# +# tags = local.tags +#} + +module "lt_dynamic" { + vpc_id = local.vpc_id + for_each = toset(var.instance_types) + instance_type = each.key + name = "swarms-size-${each.key}" + security_group_id = module.security.internal_security_group_id + ami_id = var.ami_id + tags = local.tags + source = "./components/launch_template" + iam_instance_profile_name = var.ssm_profile_name + #aws_iam_instance_profile.ssm.name + install_script = "/opt/swarms/install.sh" +} + +# module "lt_dynamic_ami" { +# vpc_id = local.vpc_id +# for_each = toset(var.instance_types) +# instance_type = each.key +# name = "swarms-ami-${each.key}" +# security_group_id = module.security.internal_security_group_id +# ami_id = local.new_ami_id +# tags= local.tags +# source = "./components/launch_template" +# iam_instance_profile_name = module.roles.ssm_profile_name +# #aws_iam_instance_profile.ssm.name +# install_script = "/opt/swarms/api/just_run.sh" +# } + +output "security_group_id" { + value = module.security.security_group_id +} + +#output vpc { +# value = module.vpc +#} + +# module "alb" { +# source = "${local.root}/application_load_balancer" +# domain_name = local.domain +# security_group_id = module.security.security_group_id # allowed to talk to internal +# public_subnets = [ +# local.ec2_public_subnet_id_1, +# local.ec2_public_subnet_id_2 ] +# vpc_id = local.vpc_id +# name = local.name +# } + +# output alb { +# value = module.alb +# } + +module "asg_dynamic" { + tags = local.tags + vpc_id = local.vpc_id + image_id = local.ami_id + ec2_subnet_id = var.subnet_id + for_each = toset(var.instance_types) + aws_iam_instance_profile_ssm_arn = var.ssm_profile_arn + #iam_instance_profile_name = module.roles.ssm_profile_name + source = "./components/autoscaling_group" + # security_group_id = module.security.internal_security_group_id + instance_type = each.key + name = "swarmdeploy-${each.key}" + launch_template_id = module.lt_dynamic[each.key].launch_template_id + # target_group_arn = module.alb.alb_target_group_arn +} + +# module "asg_dynamic_new_ami" { +# # built with packer +# #count =0 +# tags = local.tags +# vpc_id = local.vpc_id +# image_id = local.new_ami_id +# ec2_subnet_id = module.vpc.ec2_public_subnet_id_1 +# for_each = toset(var.instance_types) +# aws_iam_instance_profile_ssm_arn = module.roles.ssm_profile_arn +# source = "./components/autoscaling_group" +# # security_group_id = module.security.internal_security_group_id +# instance_type = each.key +# name = "swarms-ami-${each.key}" +# launch_template_id = module.lt_dynamic_ami[each.key].launch_template_id +# target_group_arn = module.alb.alb_target_group_arn +# } diff --git a/environments/swarms-deploy/main.tf b/environments/swarms-deploy/main.tf new file mode 100644 index 0000000..8fa2136 --- /dev/null +++ b/environments/swarms-deploy/main.tf @@ -0,0 +1,2 @@ +# + diff --git a/logs/Makefile b/logs/Makefile new file mode 100644 index 0000000..d72a5ef --- /dev/null +++ b/logs/Makefile @@ -0,0 +1,2 @@ +logs1: + bash ./getlogs.sh diff --git a/logs/analyse.sh b/logs/analyse.sh new file mode 100644 index 0000000..1132f2e --- /dev/null +++ b/logs/analyse.sh @@ -0,0 +1,6 @@ +#!/bin/bash +echo "Bash version ${BASH_VERSION}..." +for i in {1..100} +do + grep error errors.txt | grep "does not exist for account"| cut -d: -f${i} | sort | uniq -c | sort -n +done diff --git a/logs/getlogs.sh b/logs/getlogs.sh new file mode 100644 index 0000000..c5aec65 --- /dev/null +++ b/logs/getlogs.sh @@ -0,0 +1,44 @@ + + +# [--lookup-attributes ] + +# [--end-time ] +# [--event-category ] +# [--cli-input-json ] +# [--starting-token ] +# [--page-size ] +# [--max-items ] +# [--generate-cli-skeleton ] +# [--debug] +# [--endpoint-url ] +# [--no-verify-ssl] +# [--no-paginate] +# [--output ] +# [--query ] +# [--profile ] +# [--region ] +# [--version ] +# [--color ] +# [--no-sign-request] +# [--ca-bundle ] +# [--cli-read-timeout ] +# [--cli-connect-timeout ] + + +# Get today's date in YYYYMMDDHHMM format +#LOG_DATE=$(date -d "today" +"%Y%m%d%H%M") + +# Find the latest log file +LATEST_LOG=$(ls logs/*.log 2>/dev/null | tail -n 1) + +echo LATEST_LOG $LATEST_LOG + + +FILENAME=$(ls -t logs/*.log | head -1) +# Extract the start time of the latest log file +NEW_DATE=$(date -u -r ${FILENAME} +%FT%TZ) +echo NEW_DATE $NEW_DATE + +# now look for the latest logs/$(date -d "today" +"%Y%m%d%H%M").log files and get the start time of them and use that -10 seconds to start the new one, we want all events newer than our latest +aws cloudtrail lookup-events --profile swarms --region us-east-2 --start-time $NEW_DATE --output json >> logs/$(date -d "today" +"%Y%m%d%H%M").log +# --max-items 1000 diff --git a/logs/parse.py b/logs/parse.py new file mode 100644 index 0000000..58d5f17 --- /dev/null +++ b/logs/parse.py @@ -0,0 +1,1441 @@ +from datamodel_code_generator import ( DataModelType, + InputFileType, + generate) +SEP="|" +#from itertools import combinations +# # in python +# open up logs/*.log and read out json + +# { +# "Events": [ +# { +# "EventId": "3ea90966-2bc7-4d82-a208-91c4b8446f23", +# "EventName": "GetLogEvents", +# "ReadOnly": "true", +# "AccessKeyId": "ASIA5K4H36GT4C65HRZX", +# "EventTime": 1734818869.0, +# "EventSource": "logs.amazonaws.com", +# "Username": "dupont", +# "Resources": [], +# "CloudTrailEvent": "{\"eventVersion\":\"1.11\",\"userIdentity\":{\"type\":\"IAMUser\",\"principalId\":\"AIDA5K4H36GT5MJLQHPRT\",\"arn\":\"arn:aws:iam::916723593639:user/dupont\",\"accountId\":\"916723593639\",\"accessKeyId\" + +# CloudTrailEvent is a nested json + +# To read JSON data from log files in a directory using Python, you can use the following code snippet: +import json +import glob +log_files = glob.glob('logs/*.log') +all_events = [] +seen = {} + +from pydantic import BaseModel, Field, create_model,ConfigDict +from datetime import datetime +from typing import List, Optional + +# from https://github.com/aws-samples/amazon-dynamodb-pitr-table-sync/blob/43b0f85f1a6bb07b17fe7f20437eb9834601661e/src/model/aws/dynamodb/dynamodb_pitr_notification.py#L111 + +class SessionIssuer (BaseModel): + _type:str + principal_id: str = Field(None, alias="principalId") + arn: str = Field(None) + account_id: str = Field(None, alias="accountId") + user_name: str = Field(None, alias="userName") + + +class SessionContext (BaseModel): + model_config = ConfigDict(extra="forbid") + # userIdentity|sessionContext|attributes + # userIdentity|sessionContext|attributes|creationDate + # userIdentity|sessionContext|attributes|mfaAuthenticated + # userIdentity|sessionContext|ec2RoleDelivery + # userIdentity|sessionContext|sessionIssuer + # userIdentity|sessionContext|sessionIssuer|accountId + # userIdentity|sessionContext|sessionIssuer|arn + # userIdentity|sessionContext|sessionIssuer|principalId + # userIdentity|sessionContext|sessionIssuer|type + # userIdentity|sessionContext|sessionIssuer|userName + # userIdentity|sessionContext|webIdFederationData + # userIdentity|sessionContext|webIdFederationData|federatedProvider + # "SessionContext", + # attributes=( + # create_model( + # "SessionContextAttributes", + # creation_date=(datetime, Field(None, alias="creationDate")), + # mfa_authenticated=(bool, Field(None, alias="mfaAuthenticated")), + # ) + # ), + session_issuer : Optional[SessionIssuer] +class UserIdentity(BaseModel): + model_config = ConfigDict(extra="forbid") + theType: str = Field(None, alias="type") + principal_id: str = Field(None, alias="principalId") + arn: str = Field(None) + account_id: str = Field(None, alias="accountId") + access_key_id: str = Field(None, alias="accessKeyId") + userName :Optional[str] = None + invokedBy :Optional[str] = None + identityProvider :Optional[str] = None + principalId :Optional[str] = None + sessionContext : Optional[SessionContext]=None + theType: Optional[str] = Field(None, alias="type") + + +class RequestParameters(BaseModel): + model_config = ConfigDict(extra="forbid") + startTime:Optional[str] =None + nextToken:Optional[str]= None + + # requestParameters + # accelerate + # accountId + # accountIdentifiers|array[0] + # accountIds|array[0] + # acl + # actionPrefix + # actionsEnabled + # activeOnly + # agentName + # agentStatus + # agentVersion + # alarmActions|array[0] + # alarmDescription + # alarmName + # alarmNames|array[0] + # alarmTypes|array[0] + # alarmTypes|array[1] + # allAvailabilityZones + # allowedPattern + # allowInvalidContent + # allRegions + # anomalyDetectorTypes|array[0] + # anomalyDetectorTypes|array[1] + # anomalyVisibilityTime + # associationId + # attribute + # autoScalingGroupName + # autoScalingGroupNames|array[0] + # availabilityZone + # availabilityZoneId + # awsAccountId + # baselineId + # bucketName + # byShared + # certificateArn + # clientId + # clientInstanceId + # clientName + # clientToken + # cloudWatchOutputConfig + # cloudWatchOutputConfig|cloudWatchLogGroupName + # cloudWatchOutputConfig|cloudWatchOutputEnabled + # commandId + # comment + # comparisonOperator + # complianceType + # computerName + # constraints + # constraints|encryptionContextSubset + # constraints|encryptionContextSubset|aws:ebs:id + # content + # cors + # CreateLaunchTemplateRequest + # CreateLaunchTemplateRequest|ClientToken + # CreateLaunchTemplateRequest|LaunchTemplateData + # CreateLaunchTemplateRequest|LaunchTemplateData|BlockDeviceMapping + # CreateLaunchTemplateRequest|LaunchTemplateData|BlockDeviceMapping|DeviceName + # CreateLaunchTemplateRequest|LaunchTemplateData|BlockDeviceMapping|Ebs + # CreateLaunchTemplateRequest|LaunchTemplateData|BlockDeviceMapping|Ebs|Encrypted + # CreateLaunchTemplateRequest|LaunchTemplateData|BlockDeviceMapping|Ebs|VolumeSize + # CreateLaunchTemplateRequest|LaunchTemplateData|BlockDeviceMapping|Ebs|VolumeType + # CreateLaunchTemplateRequest|LaunchTemplateData|BlockDeviceMapping|tag + # CreateLaunchTemplateRequest|LaunchTemplateData|IamInstanceProfile + # CreateLaunchTemplateRequest|LaunchTemplateData|IamInstanceProfile|Name + # CreateLaunchTemplateRequest|LaunchTemplateData|ImageId + # CreateLaunchTemplateRequest|LaunchTemplateData|InstanceType + # CreateLaunchTemplateRequest|LaunchTemplateData|KeyName + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface|AssociatePublicIpAddress + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface|DeleteOnTermination + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface|DeviceIndex + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface|NetworkCardIndex + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface|SecurityGroupId + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface|SecurityGroupId|content + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface|SecurityGroupId|tag + # CreateLaunchTemplateRequest|LaunchTemplateData|NetworkInterface|tag + # CreateLaunchTemplateRequest|LaunchTemplateData|UserData + # CreateLaunchTemplateRequest|LaunchTemplateName + # CreateLaunchTemplateRequest|TagSpecification + # CreateLaunchTemplateRequest|TagSpecification|ResourceType + # CreateLaunchTemplateRequest|TagSpecification|tag + # CreateLaunchTemplateRequest|TagSpecification|Tag + # CreateLaunchTemplateRequest|TagSpecification|Tag|Key + # CreateLaunchTemplateRequest|TagSpecification|Tag|tag + # CreateLaunchTemplateRequest|TagSpecification|Tag|Value + # CreateLaunchTemplateVersionRequest + # CreateLaunchTemplateVersionRequest|ClientToken + # CreateLaunchTemplateVersionRequest|LaunchTemplateData + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|BlockDeviceMapping + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|BlockDeviceMapping|DeviceName + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|BlockDeviceMapping|Ebs + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|BlockDeviceMapping|Ebs|Encrypted + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|BlockDeviceMapping|Ebs|VolumeSize + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|BlockDeviceMapping|Ebs|VolumeType + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|BlockDeviceMapping|tag + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|IamInstanceProfile + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|IamInstanceProfile|Name + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|ImageId + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|InstanceType + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|KeyName + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface|AssociatePublicIpAddress + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface|DeleteOnTermination + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface|DeviceIndex + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface|NetworkCardIndex + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface|SecurityGroupId + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface|SecurityGroupId|content + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface|SecurityGroupId|tag + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|NetworkInterface|tag + # CreateLaunchTemplateVersionRequest|LaunchTemplateData|UserData + # CreateLaunchTemplateVersionRequest|LaunchTemplateId + # dashboardBody + # dashboardName + # DeleteLaunchTemplateRequest + # DeleteLaunchTemplateRequest|LaunchTemplateId + # descending + # DescribeAddressesAttributeRequest + # DescribeAddressesAttributeRequest|Attribute + # DescribeAddressesAttributeRequest|MaxResults + # DescribeAddressTransfersRequest + # DescribeAddressTransfersRequest|MaxResults + # DescribeCapacityReservationFleetsRequest + # DescribeCapacityReservationFleetsRequest|MaxResults + # DescribeCapacityReservationsRequest + # DescribeCapacityReservationsRequest|MaxResults + # DescribeFleetsRequest + # DescribeFleetsRequest|MaxResults + # DescribeFlowLogsRequest + # DescribeHostsRequest + # DescribeHostsRequest|MaxResults + # DescribeInstanceConnectEndpointsRequest + # DescribeInstanceConnectEndpointsRequest|MaxResults + # DescribeInstanceCreditSpecificationsRequest + # DescribeInstanceCreditSpecificationsRequest|InstanceId + # DescribeInstanceCreditSpecificationsRequest|InstanceId|content + # DescribeInstanceCreditSpecificationsRequest|InstanceId|tag + # DescribeInstanceImageMetadataRequest + # DescribeInstanceImageMetadataRequest|InstanceId + # DescribeInstanceImageMetadataRequest|InstanceId|content + # DescribeInstanceImageMetadataRequest|InstanceId|tag + # DescribeInstanceTypeOfferingsRequest + # DescribeInstanceTypeOfferingsRequest|LocationType + # DescribeInstanceTypeOfferingsRequest|NextToken + # DescribeInstanceTypesRequest + # DescribeInstanceTypesRequest|InstanceType + # DescribeInstanceTypesRequest|InstanceType|array[0] + # DescribeInstanceTypesRequest|InstanceType|array[0]|content + # DescribeInstanceTypesRequest|InstanceType|array[0]|tag + # DescribeInstanceTypesRequest|InstanceType|array[1] + # DescribeInstanceTypesRequest|InstanceType|array[10] + # DescribeInstanceTypesRequest|InstanceType|array[10]|content + # DescribeInstanceTypesRequest|InstanceType|array[10]|tag + # DescribeInstanceTypesRequest|InstanceType|array[11] + # DescribeInstanceTypesRequest|InstanceType|array[11]|content + # DescribeInstanceTypesRequest|InstanceType|array[11]|tag + # DescribeInstanceTypesRequest|InstanceType|array[12] + # DescribeInstanceTypesRequest|InstanceType|array[12]|content + # DescribeInstanceTypesRequest|InstanceType|array[12]|tag + # DescribeInstanceTypesRequest|InstanceType|array[13] + # DescribeInstanceTypesRequest|InstanceType|array[13]|content + # DescribeInstanceTypesRequest|InstanceType|array[13]|tag + # DescribeInstanceTypesRequest|InstanceType|array[14] + # DescribeInstanceTypesRequest|InstanceType|array[14]|content + # DescribeInstanceTypesRequest|InstanceType|array[14]|tag + # DescribeInstanceTypesRequest|InstanceType|array[15] + # DescribeInstanceTypesRequest|InstanceType|array[15]|content + # DescribeInstanceTypesRequest|InstanceType|array[15]|tag + # DescribeInstanceTypesRequest|InstanceType|array[16] + # DescribeInstanceTypesRequest|InstanceType|array[16]|content + # DescribeInstanceTypesRequest|InstanceType|array[16]|tag + # DescribeInstanceTypesRequest|InstanceType|array[17] + # DescribeInstanceTypesRequest|InstanceType|array[17]|content + # DescribeInstanceTypesRequest|InstanceType|array[17]|tag + # DescribeInstanceTypesRequest|InstanceType|array[18] + # DescribeInstanceTypesRequest|InstanceType|array[18]|content + # DescribeInstanceTypesRequest|InstanceType|array[18]|tag + # DescribeInstanceTypesRequest|InstanceType|array[19] + # DescribeInstanceTypesRequest|InstanceType|array[19]|content + # DescribeInstanceTypesRequest|InstanceType|array[19]|tag + # DescribeInstanceTypesRequest|InstanceType|array[1]|content + # DescribeInstanceTypesRequest|InstanceType|array[1]|tag + # DescribeInstanceTypesRequest|InstanceType|array[2] + # DescribeInstanceTypesRequest|InstanceType|array[20] + # DescribeInstanceTypesRequest|InstanceType|array[20]|content + # DescribeInstanceTypesRequest|InstanceType|array[20]|tag + # DescribeInstanceTypesRequest|InstanceType|array[2]|content + # DescribeInstanceTypesRequest|InstanceType|array[2]|tag + # DescribeInstanceTypesRequest|InstanceType|array[3] + # DescribeInstanceTypesRequest|InstanceType|array[3]|content + # DescribeInstanceTypesRequest|InstanceType|array[3]|tag + # DescribeInstanceTypesRequest|InstanceType|array[4] + # DescribeInstanceTypesRequest|InstanceType|array[4]|content + # DescribeInstanceTypesRequest|InstanceType|array[4]|tag + # DescribeInstanceTypesRequest|InstanceType|array[5] + # DescribeInstanceTypesRequest|InstanceType|array[5]|content + # DescribeInstanceTypesRequest|InstanceType|array[5]|tag + # DescribeInstanceTypesRequest|InstanceType|array[6] + # DescribeInstanceTypesRequest|InstanceType|array[6]|content + # DescribeInstanceTypesRequest|InstanceType|array[6]|tag + # DescribeInstanceTypesRequest|InstanceType|array[7] + # DescribeInstanceTypesRequest|InstanceType|array[7]|content + # DescribeInstanceTypesRequest|InstanceType|array[7]|tag + # DescribeInstanceTypesRequest|InstanceType|array[8] + # DescribeInstanceTypesRequest|InstanceType|array[8]|content + # DescribeInstanceTypesRequest|InstanceType|array[8]|tag + # DescribeInstanceTypesRequest|InstanceType|array[9] + # DescribeInstanceTypesRequest|InstanceType|array[9]|content + # DescribeInstanceTypesRequest|InstanceType|array[9]|tag + # DescribeInstanceTypesRequest|InstanceType|content + # DescribeInstanceTypesRequest|InstanceType|tag + # DescribeInstanceTypesRequest|MaxResults + # DescribeInstanceTypesRequest|NextToken + # DescribeLaunchTemplatesRequest + # DescribeLaunchTemplatesRequest|LaunchTemplateId + # DescribeLaunchTemplatesRequest|LaunchTemplateId|content + # DescribeLaunchTemplatesRequest|LaunchTemplateId|tag + # DescribeLaunchTemplatesRequest|LaunchTemplateName + # DescribeLaunchTemplatesRequest|LaunchTemplateName|content + # DescribeLaunchTemplatesRequest|LaunchTemplateName|tag + # DescribeLaunchTemplatesRequest|MaxResults + # DescribeLaunchTemplateVersionsRequest + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateId + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion|array[0] + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion|array[0]|content + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion|array[0]|tag + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion|array[1] + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion|array[1]|content + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion|array[1]|tag + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion|content + # DescribeLaunchTemplateVersionsRequest|LaunchTemplateVersion|tag + # DescribeLaunchTemplateVersionsRequest|MaxResults + # DescribeNatGatewaysRequest + # DescribeNatGatewaysRequest|MaxResults + # DescribeNetworkInsightsAccessScopesRequest + # DescribeNetworkInsightsAccessScopesRequest|MaxResults + # DescribeReplaceRootVolumeTasksRequest + # DescribeReplaceRootVolumeTasksRequest|Filter + # DescribeReplaceRootVolumeTasksRequest|Filter|Name + # DescribeReplaceRootVolumeTasksRequest|Filter|tag + # DescribeReplaceRootVolumeTasksRequest|Filter|Value + # DescribeReplaceRootVolumeTasksRequest|Filter|Value|content + # DescribeReplaceRootVolumeTasksRequest|Filter|Value|tag + # DescribeReplaceRootVolumeTasksRequest|MaxResults + # DescribeSecurityGroupRulesRequest + # DescribeSecurityGroupRulesRequest|Filter + # DescribeSecurityGroupRulesRequest|Filter|Name + # DescribeSecurityGroupRulesRequest|Filter|tag + # DescribeSecurityGroupRulesRequest|Filter|Value + # DescribeSecurityGroupRulesRequest|Filter|Value|content + # DescribeSecurityGroupRulesRequest|Filter|Value|tag + # DescribeSecurityGroupRulesRequest|MaxResults + # DescribeSpotFleetRequestsRequest + # DescribeSpotFleetRequestsRequest|MaxResults + # DescribeSpotFleetRequestsRequest|NextToken + # DescribeTrafficMirrorFiltersRequest + # DescribeTrafficMirrorFiltersRequest|MaxResults + # DescribeTransitGatewayAttachmentsRequest + # DescribeTransitGatewayAttachmentsRequest|Filter + # DescribeTransitGatewayAttachmentsRequest|Filter|Name + # DescribeTransitGatewayAttachmentsRequest|Filter|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[0] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[0]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[0]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[1] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[10] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[10]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[10]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[1]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[1]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[2] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[2]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[2]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[3] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[3]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[3]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[4] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[4]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[4]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[5] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[5]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[5]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[6] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[6]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[6]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[7] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[7]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[7]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[8] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[8]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[8]|tag + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[9] + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[9]|content + # DescribeTransitGatewayAttachmentsRequest|Filter|Value|array[9]|tag + # DescribeTransitGatewayAttachmentsRequest|MaxResults + # DescribeTransitGatewayConnectPeersRequest + # DescribeTransitGatewayConnectPeersRequest|MaxResults + # DescribeTransitGatewayPolicyTablesRequest + # DescribeTransitGatewayPolicyTablesRequest|MaxResults + # DescribeVpcEndpointsRequest + # DescribeVpcEndpointsRequest|MaxResults + # details + # detectorName + # dimensions|array[0] + # dimensions|array[0]|name + # dimensions|array[0]|value + # dimensions|array[1] + # dimensions|array[1]|name + # dimensions|array[1]|value + # dimensions|array[2] + # dimensions|array[2]|name + # dimensions|array[2]|value + # dimensions|array[3] + # dimensions|array[3]|name + # dimensions|array[3]|value + # disableApiStop + # disableApiTermination + # documentFormat + # documentName + # documentVersion + # dryRun + durationSeconds :Optional[str]=None + # encryption + # encryptionAlgorithm + # encryptionContext + # encryptionContext|aws:ebs:id + # encryptionContext|aws:logs:arn + # encryptionContext|aws:s3:arn + # encryptionContext|aws:ssm:SessionId + # encryptionContext|aws:ssm:TargetId + # encryptionContext|PARAMETER_ARN + # endTime + # evaluationFrequency + # evaluationPeriods + # eventBusName + # excludeAutoscalingAlarms + # excludeManagedAlarms + # executionResult + # executionResult|errorCode + # executionResult|executionDate + # executionResult|executionSummary + # executionResult|status + # executionSummary + # executionSummary|executionId + # executionSummary|executionTime + # executionSummary|executionType + # expression + # filterExpiredLogStreams + # filterPattern + # filters|array[0] + # Filters|array[0] + # filters|array[0]|key + # Filters|array[0]|Key + # filters|array[0]|name + # filters|array[0]|value + # filters|array[0]|values|array[0] + # Filters|array[0]|Values|array[0] + # filters|array[1] + # filters|array[1]|key + # filters|array[1]|value + # filterSet + # filterSet|items|array[0] + # filterSet|items|array[0]|name + # filterSet|items|array[0]|valueSet + # filterSet|items|array[1] + # filterSet|items|array[1]|name + # filterSet|items|array[1]|valueSet + # filterSet|items|array[1]|valueSet|items|array[0] + # filterSet|items|array[1]|valueSet|items|array[0]|value + # filtersWithOperator|array[0] + # filtersWithOperator|array[0]|key + # filtersWithOperator|array[0]|operator + # filtersWithOperator|array[0]|values|array[0] + # fingerprint + # force + # GetAllowedImagesSettingsRequest + # GetInstanceTypesFromInstanceRequirementsRequest + # GetInstanceTypesFromInstanceRequirementsRequest|ArchitectureType + # GetInstanceTypesFromInstanceRequirementsRequest|ArchitectureType|content + # GetInstanceTypesFromInstanceRequirementsRequest|ArchitectureType|tag + # GetInstanceTypesFromInstanceRequirementsRequest|InstanceRequirements + # GetInstanceTypesFromInstanceRequirementsRequest|InstanceRequirements|MemoryMiB + # GetInstanceTypesFromInstanceRequirementsRequest|InstanceRequirements|MemoryMiB|Min + # GetInstanceTypesFromInstanceRequirementsRequest|InstanceRequirements|VCpuCount + # GetInstanceTypesFromInstanceRequirementsRequest|InstanceRequirements|VCpuCount|Min + # GetInstanceTypesFromInstanceRequirementsRequest|VirtualizationType + # GetInstanceTypesFromInstanceRequirementsRequest|VirtualizationType|content + # GetInstanceTypesFromInstanceRequirementsRequest|VirtualizationType|tag + # GetSecurityGroupsForVpcRequest + # GetSecurityGroupsForVpcRequest|Filter + # GetSecurityGroupsForVpcRequest|Filter|Name + # GetSecurityGroupsForVpcRequest|Filter|tag + # GetSecurityGroupsForVpcRequest|Filter|Value + # GetSecurityGroupsForVpcRequest|Filter|Value|content + # GetSecurityGroupsForVpcRequest|Filter|Value|tag + # GetSecurityGroupsForVpcRequest|MaxResults + # GetSecurityGroupsForVpcRequest|VpcId + # granteePrincipal + # Host + # imagesSet + # imagesSet|items|array[0] + # imagesSet|items|array[0]|imageId + # includeAllInstances + # include|array[0] + # includePublic + # includePublicKey + # includes + # includeShadowTrails + # includeShared + # includes|hasDnsFqdn + # includes|keyTypes|array[0] + # includes|keyTypes|array[1] + # includes|keyTypes|array[2] + # includes|keyTypes|array[3] + # includes|keyTypes|array[4] + # includes|keyTypes|array[5] + # includes|keyTypes|array[6] + # instanceArns|array[0] + # instanceId + # instanceIds|array[0] + # instancesSet + # instancesSet|items|array[0] + # instancesSet|items|array[0]|instanceId + # instancesSet|items|array[0]|maxCount + # instancesSet|items|array[0]|minCount + # instancesSet|items|array[1] + # instancesSet|items|array[1]|instanceId + # instancesSet|items|array[2] + # instancesSet|items|array[2]|instanceId + # instancesSet|items|array[3] + # instancesSet|items|array[3]|instanceId + # instanceType + # intelligent-tiering + # interactive + # internetGatewayIdSet + # internetGatewayIdSet|items|array[0] + # internetGatewayIdSet|items|array[0]|internetGatewayId + # iPAddress + # itemContentHash + # items|array[0] + # items|array[0]|captureTime + # items|array[0]|contentHash + # items|array[0]|details + # items|array[0]|details|DocumentName + # items|array[0]|details|DocumentVersion + # items|array[0]|id + # items|array[0]|schemaVersion + # items|array[0]|severity + # items|array[0]|status + # items|array[0]|title + # items|array[0]|typeName + # items|array[1] + # items|array[1]|captureTime + # items|array[1]|contentHash + # items|array[1]|schemaVersion + # items|array[1]|typeName + # items|array[2] + # items|array[2]|captureTime + # items|array[2]|contentHash + # items|array[2]|schemaVersion + # items|array[2]|typeName + # items|array[3] + # items|array[3]|captureTime + # items|array[3]|contentHash + # items|array[3]|schemaVersion + # items|array[3]|typeName + # items|array[4] + # items|array[4]|captureTime + # items|array[4]|contentHash + # items|array[4]|schemaVersion + # items|array[4]|typeName + # keyId + # keySet + # keySet|items|array[0] + # keySet|items|array[0]|keyName + # keySpec + # kmsKeyId + # launchTemplate + # launchTemplate|launchTemplateId + # launchTemplate|version + # lifecycle + # limit + # listenerArn + # listenerArns|array[0] + # loadBalancerArn + # loadBalancerArns|array[0] + # location + # logging + # logGroupArnList|array[0] + # logGroupClass + # logGroupIdentifier + # logGroupIdentifiers|array[0] + # logGroupIdentifiers|array[1] + # logGroupIdentifiers|array[2] + # logGroupIdentifiers|array[3] + # logGroupIdentifiers|array[4] + # logGroupIdentifiers|array[5] + # logGroupIdentifiers|array[6] + # logGroupName + # logGroupNamePrefix + # logGroupNames|array[0] + # logStreamName + # logStreamNamePrefix + # lookupAttributes|array[0] + # lookupAttributes|array[0]|attributeKey + # lookupAttributes|array[0]|attributeValue + # marker + # maxConcurrency + # maxErrors + # MaxItems + # maxRecords + # maxResults + # MaxResults + # messageSchemaVersion + # metricName + # monitoring + # monitoring|enabled + # name + # namespace + # networkAclIdSet + # networkAclIdSet|items|array[0] + # networkAclIdSet|items|array[0]|networkAclId + # networkInterfaceIdSet + # networkInterfaceIdSet|items|array[0] + # networkInterfaceIdSet|items|array[0]|networkInterfaceId + # networkInterfaceSet + # networkInterfaceSet|items|array[0] + # networkInterfaceSet|items|array[0]|deviceIndex + # networkInterfaceSet|items|array[0]|networkCardIndex + # networkInterfaceSet|items|array[0]|subnetId + # newInstancesProtectedFromScaleIn + # nextToken + # notification + # numberOfBytes + # object-lock + # operations|array[0] + # orderBy + # outputS3BucketName + # overwrite + # ownershipControls + # ownersSet + # ownersSet|items|array[0] + # ownersSet|items|array[0]|owner + # pageSize + # paginationToken + # parameterFilters|array[0] + # parameterFilters|array[0]|key + # parameterFilters|array[0]|option + # parameterFilters|array[0]|values|array[0] + # parameters + # period + # permissionType + # platformName + # platformType + # platformVersion + # pluginName + # policy + # policyName + # policyNames|array[0] + # policyType + # policyTypes|array[0] + # policyTypes|array[1] + # policyTypes|array[2] + # preferences + # preferences|autoRollback + # preferences|maxHealthyPercentage + # preferences|minHealthyPercentage + # preferences|scaleInProtectedInstances + # preferences|skipMatching + # preferences|standbyInstances + # publicAccessBlock + # publicKey + # publicKeyType + # queryId + # queryLanguage + # queryString + + + # queryString|fields @timestamp, @message, @logStream, @log + # replication + # requestId + # requestPayment + # requireAcknowledgement + # resourceArn + # resourceARN + # resourceArns|array[0] + # resourceId + # resourcesPerPage + # resourcesSet + # resourcesSet|items|array[0] + # resourcesSet|items|array[0]|resourceId + # resourceType + # resourceTypeFilters|array[0] + # resultAttributes|array[0] + # resultAttributes|array[0]|typeName + # retentionInDays + # retiringPrincipal + roleArn:Optional[str]=None + roleSessionName:Optional[str]=None + # routeTableIdSet + # routeTableIdSet|items|array[0] + # routeTableIdSet|items|array[0]|routeTableId + # rule + # ruleArns|array[0] + # securityGroupIdSet + # securityGroupIdSet|items|array[0] + # securityGroupIdSet|items|array[0]|groupId + # ServerSideEncryptionConfiguration + # ServerSideEncryptionConfiguration|Rule + # ServerSideEncryptionConfiguration|Rule|ApplyServerSideEncryptionByDefault + # ServerSideEncryptionConfiguration|Rule|ApplyServerSideEncryptionByDefault|KMSMasterKeyID + # ServerSideEncryptionConfiguration|Rule|ApplyServerSideEncryptionByDefault|SSEAlgorithm + # ServerSideEncryptionConfiguration|Rule|BucketKeyEnabled + # ServerSideEncryptionConfiguration|xmlns + # serviceNamespace + # sessionId + # showSubscriptionDestinations + # snapshotType + # sSMConnectionChannel + # stackStatusFilter|array[0] + # stackStatusFilter|array[1] + # stackStatusFilter|array[10] + # stackStatusFilter|array[11] + # stackStatusFilter|array[12] + # stackStatusFilter|array[13] + # stackStatusFilter|array[14] + # stackStatusFilter|array[15] + # stackStatusFilter|array[16] + # stackStatusFilter|array[17] + # stackStatusFilter|array[18] + # stackStatusFilter|array[19] + # stackStatusFilter|array[2] + # stackStatusFilter|array[3] + # stackStatusFilter|array[4] + # stackStatusFilter|array[5] + # stackStatusFilter|array[6] + # stackStatusFilter|array[7] + # stackStatusFilter|array[8] + # stackStatusFilter|array[9] + # startFromHead + # startTime + # state + # stateValue + # statistic + # status + # subnetSet + # subnetSet|items|array[0] + # subnetSet|items|array[0]|subnetId + # tagFilters|array[0] + # tagFilters|array[0]|key + # tagging + # Tagging + # Tagging|TagSet + # Tagging|TagSet|Tag + # Tagging|TagSet|Tag|Key + # Tagging|TagSet|Tag|Value + # Tagging|xmlns + # tags + # tags|array[0] + # tags|array[0]|key + # tags|array[0]|tagKey + # tags|array[0]|tagValue + # tags|array[0]|value + # tagSet + # tagSet|items|array[0] + # tagSet|items|array[0]|key + # tagSet|items|array[0]|value + # tagSpecificationSet + # tagSpecificationSet|items|array[0] + # tagSpecificationSet|items|array[0]|resourceType + # tagSpecificationSet|items|array[0]|tags|array[0] + # tagSpecificationSet|items|array[0]|tags|array[0]|key + # tagSpecificationSet|items|array[0]|tags|array[0]|value + # tagSpecificationSet|items|array[0]|tags|array[1] + # tagSpecificationSet|items|array[0]|tags|array[1]|key + # tagSpecificationSet|items|array[0]|tags|array[1]|value + # tags|project + # target + # targetGroupArn + # targetGroupArns|array[0] + # targets|array[0] + # targets|array[0]|id + # targets|array[0]|key + # targets|array[0]|values|array[0] + # template + # threshold + # tier + # timeoutSeconds + # topicArn + # trafficSourceType + # type + # Type + # unmask + # value + # versioning + # visibility + # volumeSet + # volumeSet|items|array[0] + # volumeSet|items|array[0]|volumeId + # vpcId + # vpcSet + # vpcSet|items|array[0] + # vpcSet|items|array[0]|vpcId + # website + # windowId + # windowTaskId + # withDecryption + +class Credentials(BaseModel): + model_config = ConfigDict(extra="forbid") + +class ResponseElements(BaseModel): + model_config = ConfigDict(extra="forbid") + nextToken : Optional[str] = None + credentials : Optional[Credentials]=None +class TLSDetails(BaseModel): + model_config = ConfigDict(extra="forbid") + tlsVersion: str + cipherSuite: str + clientProvidedHostHeader: str +class Resource(BaseModel): + model_config = ConfigDict(extra="forbid") + account_id: str #, Field(None, alias="account_id")), + type: str + arn: str #, Field(None, alias="ARN")), + + +class EventDetail(BaseModel): + model_config = ConfigDict(extra="forbid") + event_version: Optional[str] = Field(None, alias="eventVersion") + user_identity: UserIdentity = Field(None, alias="userIdentity") + event_time: datetime = Field(None, alias="eventTime") + event_source: str = Field(None, alias="eventSource") + event_name: str = Field(None, alias="eventName") + aws_region: str = Field(None, alias="awsRegion") + source_ip_address: str = Field(None, alias="sourceIPAddress") + user_agent: str = Field(None, alias="userAgent") + request_parameters: Optional[RequestParameters] = Field( + None, alias="requestParameters" + ) + response_elements: Optional[ ResponseElements] = Field(None, alias="responseElements") + request_id: str = Field(None, alias="requestID") + event_id: str = Field(None, alias="eventID") + read_only: bool = Field(None, alias="readOnly") + tlsDetails: Optional[TLSDetails] + resources: Optional[List[Resource]] = None + event_type: str = Field(None, alias="eventType") + api_version: str = Field(None, alias="apiVersion") + management_event: bool = Field(None, alias="managementEvent") + recipient_account_id: str = Field(None, alias="recipientAccountId") + event_category: str = Field(None, alias="eventCategory") + session_credential_from_console: bool = Field( + None, alias="sessionCredentialFromConsole" + ) + + errorCode: Optional[str] = None + errorMessage: Optional[str] = None + +def process1(v,path): + skip =0 + #print(path,len(path)) + lpth = len(path) + + # eventType + # eventName + # eventVersion + # eventSource + # eventTime + + + # apiVersion + # awsRegion + # errorCode + # errorMessage + # eventCategory + # eventID + # managementEvent + # readOnly + # recipientAccountId + # requestID + # sessionCredentialFromConsole + # sharedEventID + # sourceIPAddress + + # tlsDetails + # tlsDetails|cipherSuite + # tlsDetails|clientProvidedHostHeader + # tlsDetails|tlsVersion + + # userAgent + + + # additionalEventData + # additionalEventData|AuthenticationMethod + # additionalEventData|bytesTransferredIn + # additionalEventData|bytesTransferredOut + # additionalEventData|CipherSuite + # additionalEventData|grantId + # additionalEventData|identityProviderConnectionVerificationMethod + # additionalEventData|LoginTo + # additionalEventData|MFAUsed + # additionalEventData|MobileVersion + # additionalEventData|service + # additionalEventData|SignatureVersion + # additionalEventData|x-amz-id-2 + + # resources|array[0] + # resources|array[0]|accountId + # resources|array[0]|ARN + # resources|array[0]|type + # resources|array[1] + # resources|array[10] + # resources|array[10]|accountId + # resources|array[10]|ARN + # resources|array[10]|type + # resources|array[11] + # resources|array[11]|accountId + # resources|array[11]|ARN + # resources|array[11]|type + # resources|array[1]|accountId + # resources|array[1]|ARN + # resources|array[1]|type + # resources|array[2] + # resources|array[2]|accountId + # resources|array[2]|ARN + # resources|array[2]|type + # resources|array[3] + # resources|array[3]|accountId + # resources|array[3]|ARN + # resources|array[3]|type + # resources|array[4] + # resources|array[4]|accountId + # resources|array[4]|ARN + # resources|array[4]|type + # resources|array[5] + # resources|array[5]|accountId + # resources|array[5]|ARN + # resources|array[5]|type + # resources|array[6] + # resources|array[6]|accountId + # resources|array[6]|ARN + # resources|array[6]|type + # resources|array[7] + # resources|array[7]|accountId + # resources|array[7]|ARN + # resources|array[7]|type + # resources|array[8] + # resources|array[8]|accountId + # resources|array[8]|ARN + # resources|array[8]|type + # resources|array[9] + # resources|array[9]|accountId + # resources|array[9]|ARN + # resources|array[9]|type + + + # responseElements + # responseElements|anomalyDetectorArn + # responseElements|assumedRoleUser + # responseElements|assumedRoleUser|arn + # responseElements|assumedRoleUser|assumedRoleId + # responseElements|audience + # responseElements|command + # responseElements|command|alarmConfiguration + # responseElements|command|alarmConfiguration|ignorePollAlarmFailure + # responseElements|command|clientName + # responseElements|command|clientSourceId + # responseElements|command|cloudWatchOutputConfig + # responseElements|command|cloudWatchOutputConfig|cloudWatchLogGroupName + # responseElements|command|cloudWatchOutputConfig|cloudWatchOutputEnabled + # responseElements|command|commandId + # responseElementso|command|comment + # responseElements|command|completedCount + # responseElements|command|deliveryTimedOutCount + # responseElements|command|documentName + # responseElements|command|documentVersion + # responseElements|command|errorCount + # responseElements|command|expiresAfter + # responseElements|command|hasCancelCommandSignature + # responseElements|command|hasSendCommandSignature + # responseElements|command|instanceIds|array[0] + # responseElements|command|interactive + # responseElements|command|maxConcurrency + # responseElements|command|maxErrors + # responseElements|command|notificationConfig + # responseElements|command|notificationConfig|notificationArn + # responseElements|command|notificationConfig|notificationType + # responseElements|command|outputS3BucketName + # responseElements|command|outputS3KeyPrefix + # responseElements|command|outputS3Region + # responseElements|command|parameters + # responseElements|command|requestedDateTime + # responseElements|command|serviceRole + # responseElements|command|status + # responseElements|command|statusDetails + # responseElements|command|targetCount + # responseElements|command|targets|array[0] + # responseElements|command|targets|array[0]|key + # responseElements|command|targets|array[0]|values|array[0] + # responseElements|command|timeoutSeconds + # responseElements|ConsoleLogin + # responseElements|CreateLaunchTemplateResponse + # responseElements|CreateLaunchTemplateResponse|launchTemplate + # responseElements|CreateLaunchTemplateResponse|launchTemplate|createdBy + # responseElements|CreateLaunchTemplateResponse|launchTemplate|createTime + # responseElements|CreateLaunchTemplateResponse|launchTemplate|defaultVersionNumber + # responseElements|CreateLaunchTemplateResponse|launchTemplate|latestVersionNumber + # responseElements|CreateLaunchTemplateResponse|launchTemplate|launchTemplateId + # responseElements|CreateLaunchTemplateResponse|launchTemplate|launchTemplateName + # responseElements|CreateLaunchTemplateResponse|launchTemplate|operator + # responseElements|CreateLaunchTemplateResponse|launchTemplate|operator|managed + # responseElements|CreateLaunchTemplateResponse|launchTemplate|tagSet + # responseElements|CreateLaunchTemplateResponse|launchTemplate|tagSet|item + # responseElements|CreateLaunchTemplateResponse|launchTemplate|tagSet|item|key + # responseElements|CreateLaunchTemplateResponse|launchTemplate|tagSet|item|value + # responseElements|CreateLaunchTemplateResponse|requestId + # responseElements|CreateLaunchTemplateResponse|xmlns + # responseElements|CreateLaunchTemplateVersionResponse + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|createdBy + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|createTime + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|defaultVersion + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|blockDeviceMappingSet + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|blockDeviceMappingSet|item + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|blockDeviceMappingSet|item|deviceName + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|blockDeviceMappingSet|item|ebs + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|blockDeviceMappingSet|item|ebs|encrypted + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|blockDeviceMappingSet|item|ebs|volumeSize + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|blockDeviceMappingSet|item|ebs|volumeType + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|iamInstanceProfile + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|iamInstanceProfile|name + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|imageId + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|instanceType + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|keyName + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|networkInterfaceSet + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|networkInterfaceSet|item + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|networkInterfaceSet|item|associatePublicIpAddress + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|networkInterfaceSet|item|deleteOnTermination + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|networkInterfaceSet|item|deviceIndex + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|networkInterfaceSet|item|groupSet + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|networkInterfaceSet|item|groupSet|groupId + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|networkInterfaceSet|item|networkCardIndex + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateData|userData + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateId + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|launchTemplateName + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|operator + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|operator|managed + # responseElements|CreateLaunchTemplateVersionResponse|launchTemplateVersion|versionNumber + # responseElements|CreateLaunchTemplateVersionResponse|requestId + # responseElements|CreateLaunchTemplateVersionResponse|xmlns + # responseElements|credentials + # responseElements|credentials|accessKeyId + # responseElements|credentials|expiration + # responseElements|credentials|sessionToken + # responseElements|DeleteLaunchTemplateResponse + # responseElements|DeleteLaunchTemplateResponse|launchTemplate + # responseElements|DeleteLaunchTemplateResponse|launchTemplate|createdBy + # responseElements|DeleteLaunchTemplateResponse|launchTemplate|createTime + # responseElements|DeleteLaunchTemplateResponse|launchTemplate|defaultVersionNumber + # responseElements|DeleteLaunchTemplateResponse|launchTemplate|latestVersionNumber + # responseElements|DeleteLaunchTemplateResponse|launchTemplate|launchTemplateId + # responseElements|DeleteLaunchTemplateResponse|launchTemplate|launchTemplateName + # responseElements|DeleteLaunchTemplateResponse|launchTemplate|operator + # responseElements|DeleteLaunchTemplateResponse|launchTemplate|operator|managed + # responseElements|DeleteLaunchTemplateResponse|requestId + # responseElements|DeleteLaunchTemplateResponse|xmlns + # responseElements|description + # responseElements|description|defaultVersion + # responseElements|description|name + # responseElements|documentDescription + # responseElements|documentDescription|createdDate + # responseElements|documentDescription|defaultVersion + # responseElements|documentDescription|documentFormat + # responseElements|documentDescription|documentId + # responseElements|documentDescription|documentType + # responseElements|documentDescription|documentVersion + # responseElements|documentDescription|hash + # responseElements|documentDescription|hashType + # responseElements|documentDescription|latestVersion + # responseElements|documentDescription|name + # responseElements|documentDescription|owner + # responseElements|documentDescription|platformTypes|array[0] + # responseElements|documentDescription|platformTypes|array[1] + # responseElements|documentDescription|platformTypes|array[2] + # responseElements|documentDescription|schemaVersion + # responseElements|documentDescription|status + # responseElements|documentDescription|tags|array[0] + # responseElements|documentDescription|tags|array[0]|key + # responseElements|documentDescription|tags|array[0]|value + # responseElements|grantId + # responseElements|instanceId + # responseElements|instanceRefreshId + # responseElements|instancesSet + # responseElements|instancesSet|items|array[0] + # responseElements|instancesSet|items|array[0]|amiLaunchIndex + # responseElements|instancesSet|items|array[0]|architecture + # responseElements|instancesSet|items|array[0]|bootMode + # responseElements|instancesSet|items|array[0]|capacityReservationSpecification + # responseElements|instancesSet|items|array[0]|capacityReservationSpecification|capacityReservationPreference + # responseElements|instancesSet|items|array[0]|clientToken + # responseElements|instancesSet|items|array[0]|cpuOptions + # responseElements|instancesSet|items|array[0]|cpuOptions|coreCount + # responseElements|instancesSet|items|array[0]|cpuOptions|threadsPerCore + # responseElements|instancesSet|items|array[0]|currentInstanceBootMode + # responseElements|instancesSet|items|array[0]|currentState + # responseElements|instancesSet|items|array[0]|currentState|code + # responseElements|instancesSet|items|array[0]|currentState|name + # responseElements|instancesSet|items|array[0]|ebsOptimized + # responseElements|instancesSet|items|array[0]|enaSupport + # responseElements|instancesSet|items|array[0]|enclaveOptions + # responseElements|instancesSet|items|array[0]|enclaveOptions|enabled + # responseElements|instancesSet|items|array[0]|groupSet + # responseElements|instancesSet|items|array[0]|groupSet|items|array[0] + # responseElements|instancesSet|items|array[0]|groupSet|items|array[0]|groupId + # responseElements|instancesSet|items|array[0]|groupSet|items|array[0]|groupName + # responseElements|instancesSet|items|array[0]|hypervisor + # responseElements|instancesSet|items|array[0]|iamInstanceProfile + # responseElements|instancesSet|items|array[0]|iamInstanceProfile|arn + # responseElements|instancesSet|items|array[0]|iamInstanceProfile|id + # responseElements|instancesSet|items|array[0]|imageId + # responseElements|instancesSet|items|array[0]|instanceId + # responseElements|instancesSet|items|array[0]|instanceState + # responseElements|instancesSet|items|array[0]|instanceState|code + # responseElements|instancesSet|items|array[0]|instanceState|name + # responseElements|instancesSet|items|array[0]|instanceType + # responseElements|instancesSet|items|array[0]|keyName + # responseElements|instancesSet|items|array[0]|launchTime + # responseElements|instancesSet|items|array[0]|maintenanceOptions + # responseElements|instancesSet|items|array[0]|maintenanceOptions|autoRecovery + # responseElements|instancesSet|items|array[0]|metadataOptions + # responseElements|instancesSet|items|array[0]|metadataOptions|httpEndpoint + # responseElements|instancesSet|items|array[0]|metadataOptions|httpProtocolIpv4 + # responseElements|instancesSet|items|array[0]|metadataOptions|httpProtocolIpv6 + # responseElements|instancesSet|items|array[0]|metadataOptions|httpPutResponseHopLimit + # responseElements|instancesSet|items|array[0]|metadataOptions|httpTokens + # responseElements|instancesSet|items|array[0]|metadataOptions|instanceMetadataTags + # responseElements|instancesSet|items|array[0]|metadataOptions|state + # responseElements|instancesSet|items|array[0]|monitoring + # responseElements|instancesSet|items|array[0]|monitoring|state + # responseElements|instancesSet|items|array[0]|networkInterfaceSet + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0] + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|attachment + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|attachment|attachmentId + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|attachment|attachTime + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|attachment|deleteOnTermination + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|attachment|deviceIndex + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|attachment|networkCardIndex + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|attachment|status + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|groupSet + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|groupSet|items|array[0] + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|groupSet|items|array[0]|groupId + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|groupSet|items|array[0]|groupName + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|interfaceType + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|macAddress + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|networkInterfaceId + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|operator + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|operator|managed + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|ownerId + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|privateDnsName + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|privateIpAddress + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|privateIpAddressesSet + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|privateIpAddressesSet|item|array[0] + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|privateIpAddressesSet|item|array[0]|primary + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|privateIpAddressesSet|item|array[0]|privateDnsName + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|privateIpAddressesSet|item|array[0]|privateIpAddress + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|sourceDestCheck + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|status + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|subnetId + # responseElements|instancesSet|items|array[0]|networkInterfaceSet|items|array[0]|vpcId + # responseElements|instancesSet|items|array[0]|operator + # responseElements|instancesSet|items|array[0]|operator|managed + # responseElements|instancesSet|items|array[0]|placement + # responseElements|instancesSet|items|array[0]|placement|availabilityZone + # responseElements|instancesSet|items|array[0]|placement|tenancy + # responseElements|instancesSet|items|array[0]|previousState + # responseElements|instancesSet|items|array[0]|previousState|code + # responseElements|instancesSet|items|array[0]|previousState|name + # responseElements|instancesSet|items|array[0]|privateDnsName + # responseElements|instancesSet|items|array[0]|privateDnsNameOptions + # responseElements|instancesSet|items|array[0]|privateDnsNameOptions|enableResourceNameDnsAAAARecord + # responseElements|instancesSet|items|array[0]|privateDnsNameOptions|enableResourceNameDnsARecord + # responseElements|instancesSet|items|array[0]|privateDnsNameOptions|hostnameType + # responseElements|instancesSet|items|array[0]|privateIpAddress + # responseElements|instancesSet|items|array[0]|rootDeviceName + # responseElements|instancesSet|items|array[0]|rootDeviceType + # responseElements|instancesSet|items|array[0]|sourceDestCheck + # responseElements|instancesSet|items|array[0]|stateReason + # responseElements|instancesSet|items|array[0]|stateReason|code + # responseElements|instancesSet|items|array[0]|stateReason|message + # responseElements|instancesSet|items|array[0]|subnetId + # responseElements|instancesSet|items|array[0]|tagSet + # responseElements|instancesSet|items|array[0]|tagSet|items|array[0] + # responseElements|instancesSet|items|array[0]|tagSet|items|array[0]|key + # responseElements|instancesSet|items|array[0]|tagSet|items|array[0]|value + # responseElements|instancesSet|items|array[0]|tagSet|items|array[1] + # responseElements|instancesSet|items|array[0]|tagSet|items|array[1]|key + # responseElements|instancesSet|items|array[0]|tagSet|items|array[1]|value + # responseElements|instancesSet|items|array[0]|tagSet|items|array[2] + # responseElements|instancesSet|items|array[0]|tagSet|items|array[2]|key + # responseElements|instancesSet|items|array[0]|tagSet|items|array[2]|value + # responseElements|instancesSet|items|array[0]|tagSet|items|array[3] + # responseElements|instancesSet|items|array[0]|tagSet|items|array[3]|key + # responseElements|instancesSet|items|array[0]|tagSet|items|array[3]|value + # responseElements|instancesSet|items|array[0]|virtualizationType + # responseElements|instancesSet|items|array[0]|vpcId + # responseElements|keyId + # responseElements|messageSchemaVersion + # responseElements|ownerId + # responseElements|packedPolicySize + # responseElements|provider + # responseElements|queryId + # responseElements|requesterId + # responseElements|requestId + # responseElements|reservationId + # responseElements|_return + # responseElements|sessionId + # responseElements|streamUrl + # responseElements|subjectFromWebIdentityToken + # responseElements|tier + # responseElements|tokenValue + # responseElements|version + + # serviceEventDetails + # serviceEventDetails|DocumentName + # serviceEventDetails|IdleSessionTimeout + # serviceEventDetails|IsCloudWatchEncryptionEnabled + # serviceEventDetails|IsKmsEncryptionEnabled + # serviceEventDetails|IsS3EncryptionEnabled + # serviceEventDetails|MaxSessionDuration + + + # userIdentity + + # vpcEndpointAccountId + # vpcEndpointId + + if (lpth == 6) and (path[0] in ["CloudTrailEvent"]) and (path[1] in ["userIdentity"]) and (path[2] in ["sessionContext"]) and (path[3] in ["sessionIssuer"]) and (path[4] in ["userName"]) : + if path[5] in ["aws:ec2-infrastructure", + "aws:ec2-instance", + "AWSServiceRoleForAmazonSSM" + "AWSServiceRoleForAutoScaling", + "AWSServiceRoleForCloudWatchApplicationSignals", + "AWSServiceRoleForConfig", + "AWSServiceRoleForResourceExplorer", + "github", + "ssm-swarms-role"]: + interesting =1 + elif (path[4] in ["accountId","arn","principalId","type","userName"]): + pass + +#if (lpth == 6) and (path[0] in ["CloudTrailEvent"]) and (path[1] in ["userIdentity"]) and (path[2] in ["sessionContext"]) and (path[3] in ["webIdFederationData"]) and (path[4] in ["federatedProvider"]) and (path[5] in ["arn:aws:iam::916723593639:oidc-provider/token.actions.githubusercontent.com"]): + + # if (lpth>2): + # if path[1] in ["userIdentity"]: + # if path[2] in ["accessKeyId"]: + # skip =1 + # if path[1] in [ "requestParameters"]: + + # if path[2] in [ "startTime", "nextToken"]: + # skip =1 + + #elif (lpth>1): + # if path[1] in ["eventID","requestID","eventTime"]: + # skip = 1 + if skip: + return + if isinstance(v,list): + for i,j in enumerate(v): + path2= path.copy() + path2.extend(["array" + "["+str(i)+"]"]) + #v2 = v[i] + process1(j,path2) + + elif isinstance(v,dict): + # we are in the next level of substructrue + for k2 in v: + v2= v[k2] + path2= path.copy() + path2.extend([k2]) + qk2 = SEP.join(path2) + vt2 = type(v2) + if qk2 not in seen: + + seen[qk2] =1 + else: + seen[qk2] = seen[qk2] + 1 + # some of these we can add directly + #print("DEBUG2",qk2,vt2) + process1(v2,path2) + else: + path2= path.copy() + path2.extend([str(v)]) + qk2 = SEP.join(path2) + if qk2 not in seen: + seen[qk2] =1 + else: + seen[qk2] = seen[qk2] + 1 + # some of these we can add directly + #print("DEBUG3",qk2,v,seen[qk2]) + #process1("1",path2) + +report = {} +results = {} +items = [] +for log_file in log_files: + with open(log_file, 'r') as f: + try: + event_data = json.load(f) + except Exception as e: + print(log_file,e) + e1 = event_data.get("Events", []) + for e in e1: + #print(e1) + target = "CloudTrailEvent" + if target in e: + e2 = json.loads(e[target]) # eval again + #print("DEBUG1",e2) + #ct = EventDetail(**e2) + items.append(e2) + +result = generate(items, + disable_timestamp=True, + enable_version_header = False, + input_file_type=InputFileType.Dict, + input_filename=None, + #output=output_file, + output_model_type=DataModelType.PydanticV2BaseModel, + snake_case_field=True + ) +#print("DEBUG1",e2, result) +if result not in results: + results[result] =1 + print(result) + for k in e2: + v= e2[k] + qualified_path = [target,k] + qk = SEP.join(qualified_path) + vt = type(v) + if qk not in seen: + seen[qk] =1 + else: + seen[qk] = seen[qk] +1 + process1(v,qualified_path) + + # now report on the event + facts = sorted(seen.keys()) + seen = {} + #for x in facts: + # v = seen[x] + # if (v>0): + #facts.append("\t".join([str(v),x])) + # seen[x] =0 # reset + #for p in combinations(facts,3): + #print(facts) + for f in facts: + #k = ".".join(p) + if f not in report: + report[f] = 1 + else: + report[f] = report[f] + 1 + +for k in report: + parts = k.split("|") + parts.pop() # remove the last one + #lpth = len(parts) + #parts2 = f"if (lpth == {lpth}) " + #for e,i in enumerate(parts): + # parts2 = parts2 + f" and (path[{e}] in [\"{i}\"])" + #print(parts2 + ":") + #print("""|".join(parts[1:])) + #print(k,report[k]) +#print(all_events) # or process the events as needed + +# ### Explanation: +# 1. **glob** is used to match all log files in the `logs` directory. +# 2. The code reads each log file line by line and attempts to decode each line as JSON. +# 3. Any parsed events are collected in the `all_events` list. +# 4. Handle JSON decoding errors gracefully. + + + +# import boto3, argparse, json +# from datetime import datetime, timedelta + +# # Parse command line arguments +# parser = argparse.ArgumentParser(description='Generate an IAM policy based on CloudTrail events.') +# parser.add_argument('--service', dest='service_name', required=True, +# help='The name of the AWS service to generate a policy for (e.g., "ec2", "s3", "lambda", etc.)') +# parser.add_argument('--region', dest='region_name', required=True, +# help='The AWS region to search for CloudTrail events in') +# parser.add_argument('--hours', dest='hours_ago', type=int, default=2, +# help='The number of hours to look back in the CloudTrail events (default is 2)') +# args = parser.parse_args() + +# # Initialize CloudTrail client +# client = boto3.client('cloudtrail', region_name=args.region_name) + +# # Calculate start time for CloudTrail lookup +# start_time = datetime.utcnow() - timedelta(hours=args.hours_ago) + +# # Dictionary to store permissions by service +# permissions_by_service = {} + +# # Paginate through CloudTrail events +# for response in client.get_paginator('lookup_events').paginate( +# StartTime=start_time, +# EndTime=datetime.utcnow(), +# LookupAttributes=[ +# { +# 'AttributeKey': 'EventSource', +# 'AttributeValue': f'{args.service_name}.amazonaws.com' +# } +# ] +# ): +# # Iterate through events and extract permissions +# for event in response['Events']: +# permission = event['EventName'] +# if ":" in permission: +# service, action = permission.split(':') +# else: +# service = args.service_name +# action = permission +# permissions_by_service.setdefault(service, set()).add(action) + +# # Create policy statement +# policy = { +# "Version": "2012-10-17", +# "Statement": [] +# } + +# # Iterate through permissions by service and add to policy statement +# for service, actions in permissions_by_service.items(): +# statement = { +# "Sid": "VisualEditor0", +# "Effect": "Allow", +# "Action": [f"{service}:{action}" for action in actions], +# "Resource": "*" +# } +# policy["Statement"].append(statement) + +# # Print policy in JSON format +# print(f"last: {args.hours_ago}h") +# print(f"service name filter: {args.service_name}") +# print(json.dumps(policy, indent=4)) + +ideas = """ +1. some fields have unique values, count of one, all of them. +CloudTrailEvent.requestID +CloudTrailEvent.eventID + +2. some timestamps overlap, we can round up the time into chunks. +""" diff --git a/logs/report.sh b/logs/report.sh new file mode 100644 index 0000000..2bb39d9 --- /dev/null +++ b/logs/report.sh @@ -0,0 +1,8 @@ +# get the lines +gron ./logs/*.log | grep -i error > report.txt + +# now we decode +gron -u < report.txt | jq -r ".Events[]|.CloudTrailEvent" | jq | sort | uniq -c | sort -n + + +#gron -u < report.txt | jq -r ".Events[]|.CloudTrailEvent" | jq diff --git a/main.tf b/main.tf new file mode 100644 index 0000000..d31853b --- /dev/null +++ b/main.tf @@ -0,0 +1,4 @@ +# this +module "swarms" { + source = "./accounts/swarms" +} diff --git a/modules/aws/ssm/observability/install.sh b/modules/aws/ssm/observability/install.sh new file mode 100644 index 0000000..3c5bc20 --- /dev/null +++ b/modules/aws/ssm/observability/install.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# lets make this bash script into a terraform template +#aws_ssm_parameter_cw_agent_config_name=${aws_ssm_parameter.cw_agent_config.name} +# Update package lists +apt-get update +apt-get upgrade -y + +# Install prerequisite packages +apt-get install -y wget unzip systemd + +# Download and install the CloudWatch agent +wget https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb +dpkg -i -E ./amazon-cloudwatch-agent.deb + +# In case of missing dependencies +apt-get install -f -y + +# Configure and start the CloudWatch agent +/opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c "ssm:${aws_ssm_parameter_cw_agent_config_name}" + + +# Enable and start the service using systemctl +systemctl enable amazon-cloudwatch-agent +systemctl start amazon-cloudwatch-agent + +# Clean up downloaded files +rm -f amazon-cloudwatch-agent.deb + +# Verify installation +systemctl status amazon-cloudwatch-agent diff --git a/modules/aws/ssm/observability/jobs/main.tf b/modules/aws/ssm/observability/jobs/main.tf new file mode 100644 index 0000000..34b23c8 --- /dev/null +++ b/modules/aws/ssm/observability/jobs/main.tf @@ -0,0 +1,251 @@ +# S3 Bucket for SSM logs and outputs +resource "aws_s3_bucket" "ssm_logs" { + bucket = "ssm-operation-logs-${data.aws_caller_identity.current.account_id}" +} + +resource "aws_s3_bucket_versioning" "ssm_logs" { + bucket = aws_s3_bucket.ssm_logs.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_lifecycle_configuration" "ssm_logs" { + bucket = aws_s3_bucket.ssm_logs.id + + rule { + id = "cleanup_old_logs" + status = "Enabled" + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + expiration { + days = 90 + } + } +} + +# SSM Session logging to S3 +resource "aws_s3_bucket_policy" "ssm_logs" { + bucket = aws_s3_bucket.ssm_logs.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "SSMBucketPermission" + Effect = "Allow" + Principal = { + Service = "ssm.amazonaws.com" + } + Action = [ + "s3:PutObject", + "s3:PutObjectAcl" + ] + Resource = "${aws_s3_bucket.ssm_logs.arn}/*" + Condition = { + StringEquals = { + "s3:x-amz-acl" = "bucket-owner-full-control" + } + } + } + ] + }) +} + +# CloudWatch Log Group for SSM +resource "aws_cloudwatch_log_group" "ssm_logs" { + name = "/aws/ssm/operations" + retention_in_days = 30 +} + +# X-Ray tracing configuration +resource "aws_xray_sampling_rule" "ssm_tracing" { + rule_name = "SSMOperations" + priority = 1000 + reservoir_size = 1 + fixed_rate = 0.05 + host = "*" + http_method = "*" + url_path = "*" + service_name = "*" + service_type = "*" + version = 1 +} + +# IAM role updates for X-Ray and enhanced logging +resource "aws_iam_role_policy_attachment" "xray_policy" { + role = aws_iam_role.maintenance_window_role.name + policy_arn = "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess" +} + +# Custom policy for S3 and CloudWatch access +resource "aws_iam_role_policy" "ssm_logging" { + name = "ssm-logging-policy" + role = aws_iam_role.maintenance_window_role.name + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "s3:PutObject", + "s3:GetObject", + "s3:PutObjectAcl" + ] + Resource = "${aws_s3_bucket.ssm_logs.arn}/*" + }, + { + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ] + Resource = "${aws_cloudwatch_log_group.ssm_logs.arn}:*" + } + ] + }) +} + +# Updated SSM Document for Python script execution with X-Ray tracing +resource "aws_ssm_document" "python_with_xray" { + name = "RunPythonWithXRay" + document_type = "Command" + document_format = "YAML" + + content = < /tmp/xray-daemon.json << 'EOF' + { + "Version": 1, + "RecordingName": "SSMOperation", + "RecordingType": "ServiceType", + "SamplingRule": { + "FixedRate": 1, + "ReservoirSize": 5 + } + } + EOF + - curl https://s3.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-linux-3.x.zip -o /tmp/xray-daemon.zip + - unzip /tmp/xray-daemon.zip -d /opt/xray + - /opt/xray/xray-daemon -c /tmp/xray-daemon.json & + - action: aws:runShellScript + name: ExecuteScript + inputs: + runCommand: + - | + cat > /tmp/wrapper.py << 'EOF' + import boto3 + from aws_xray_sdk.core import xray_recorder + from aws_xray_sdk.core import patch_all + import os + import sys + import json + + # Initialize X-Ray + xray_recorder.configure( + context_missing='LOG_ERROR', + service='SSMPythonOperation' + ) + patch_all() + + # Start X-Ray segment + segment = xray_recorder.begin_segment('SSMPythonScript') + + try: + # Execute the actual script + with open('/tmp/script.py', 'r') as f: + exec(f.read()) + except Exception as e: + segment.put_annotation('error', str(e)) + raise + finally: + xray_recorder.end_segment() + EOF + - echo "{{ ScriptContent }}" > /tmp/script.py + - python3 /tmp/wrapper.py +DOC +} + +# CloudWatch Dashboard for SSM Operations +resource "aws_cloudwatch_dashboard" "ssm_operations" { + dashboard_name = "SSMOperations" + + dashboard_body = jsonencode({ + widgets = [ + { + type = "metric" + x = 0 + y = 0 + width = 12 + height = 6 + properties = { + metrics = [ + ["AWS/SSM", "CommandSuccess", "DocumentName", "RunPythonWithXRay"], + ["AWS/SSM", "CommandFailed", "DocumentName", "RunPythonWithXRay"] + ] + period = 300 + stat = "Sum" + region = var.aws_region + title = "SSM Command Execution Status" + } + }, + { + type = "log" + x = 0 + y = 6 + width = 12 + height = 6 + properties = { + query = "fields @timestamp, @message | sort @timestamp desc | limit 20" + region = var.aws_region + title = "Recent SSM Operation Logs" + view = "table" + logGroupName = aws_cloudwatch_log_group.ssm_logs.name + } + } + ] + }) +} + +# Get current account ID +data "aws_caller_identity" "current" {} + +# CloudWatch Metric Alarm for Failed Commands +resource "aws_cloudwatch_metric_alarm" "ssm_failures" { + alarm_name = "ssm-command-failures" + comparison_operator = "GreaterThanThreshold" + evaluation_periods = "1" + metric_name = "CommandFailed" + namespace = "AWS/SSM" + period = "300" + statistic = "Sum" + threshold = "0" + alarm_description = "This metric monitors failed SSM commands" + + dimensions = { + DocumentName = aws_ssm_document.python_with_xray.name + } +} \ No newline at end of file diff --git a/modules/aws/ssm/observability/main.tf b/modules/aws/ssm/observability/main.tf new file mode 100755 index 0000000..f3f4f11 --- /dev/null +++ b/modules/aws/ssm/observability/main.tf @@ -0,0 +1,540 @@ +variable "ami_id" {} +# Previous provider and variables configuration remains the same +#provider "aws" { +# region = var.aws_region +#} + +variable "aws_region" { + description = "AWS region" + type = string + default = "us-east-2" +} + +variable "instance_type" { + description = "EC2 instance type" + type = string + default = "t3.micro" +} + +variable "patch_schedule" { + description = "Cron expression for patch schedule" + type = string + default = "cron(0 0 ? * SUN *)" # Run at midnight every Sunday +} + +# Update EC2 role to include SSM permissions +resource "aws_iam_role" "ec2_monitoring_role" { + name = "ec2-monitoring-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + } + ] + }) +} + +# Add SSM policy attachments +resource "aws_iam_role_policy_attachment" "ssm_policy" { + role = aws_iam_role.ec2_monitoring_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_agent_policy" { + role = aws_iam_role.ec2_monitoring_role.name + policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" +} + +# Previous instance profile configuration remains the same +resource "aws_iam_instance_profile" "monitoring_profile" { + name = "ec2-monitoring-profile" + role = aws_iam_role.ec2_monitoring_role.name +} + +# SSM Patch Baseline +resource "aws_ssm_patch_baseline" "os_patches" { + name = "ec2-patch-baseline" + operating_system = "AMAZON_LINUX_2" + + approval_rule { + approve_after_days = 7 + compliance_level = "HIGH" + + patch_filter { + key = "CLASSIFICATION" + values = ["Security", "Bugfix", "Recommended"] + #valid values are: Security, Bugfix, Enhancement, Recommended, Newpackage + } + + patch_filter { + key = "SEVERITY" + values = ["Critical", "Important"] + } + } + + tags = { + Environment = "Production" + } +} + +# SSM Patch Group +resource "aws_ssm_patch_group" "patch_group" { + baseline_id = aws_ssm_patch_baseline.os_patches.id + patch_group = "production-servers" +} + +# SSM Maintenance Window +resource "aws_ssm_maintenance_window" "patch_window" { + cutoff = 1 + name = "production-patch-window" + schedule = var.patch_schedule + duration = 4 #"PT4H" # 4 hours + allow_unassociated_targets = false +} + +# Maintenance Window Target +resource "aws_ssm_maintenance_window_target" "patch_target" { + resource_type = "INSTANCE" + window_id = aws_ssm_maintenance_window.patch_window.id + name = "patch-production-servers" + + targets { + key = "tag:PatchGroup" + values = ["production-servers"] + } +} + +# Maintenance Window Task +resource "aws_ssm_maintenance_window_task" "patch_task" { + window_id = aws_ssm_maintenance_window.patch_window.id + task_type = "RUN_COMMAND" + task_arn = "AWS-RunPatchBaseline" + service_role_arn = aws_iam_role.maintenance_window_role.arn + priority = 1 + max_concurrency = "50%" + max_errors = "25%" + + targets { + key = "WindowTargetIds" + values = [aws_ssm_maintenance_window_target.patch_target.id] + } + + task_invocation_parameters { + run_command_parameters { + parameter { + name = "Operation" + values = ["Install"] + } + parameter { + name = "RebootOption" + values = ["RebootIfNeeded"] + } + } + } +} + +# Maintenance Window Role +resource "aws_iam_role" "maintenance_window_role" { + name = "ssm-maintenance-window-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ssm.amazonaws.com" + } + } + ] + }) +} + +# Attach required policies for Maintenance Window Role +resource "aws_iam_role_policy_attachment" "maintenance_window_policy" { + role = aws_iam_role.maintenance_window_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonSSMMaintenanceWindowRole" +} + +# # Update EC2 instance configuration with patch group tag +# resource "aws_instance" "monitored_instance" { +# ami = var.ami_id +# instance_type = var.instance_type + +# iam_instance_profile = aws_iam_instance_profile.monitoring_profile.name +# monitoring = true + +# user_data = <<-EOF +# #!/bin/bash +# yum update -y +# yum install -y amazon-cloudwatch-agent +# amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c ssm:${aws_ssm_parameter.cw_agent_config.name} +# systemctl start amazon-cloudwatch-agent +# systemctl enable amazon-cloudwatch-agent +# EOF + +# tags = { +# Name = "monitored-instance" +# PatchGroup = "production-servers" +# } +# } + +# Add CloudWatch Event Rule for Patch Compliance Monitoring +resource "aws_cloudwatch_event_rule" "patch_compliance" { + name = "patch-compliance-monitoring" + description = "Monitor patch compliance state changes" + + event_pattern = jsonencode({ + source = ["aws.ssm"] + detail-type = ["Patch Compliance State Change"] + }) +} + +resource "aws_cloudwatch_event_target" "patch_compliance_sns" { + rule = aws_cloudwatch_event_rule.patch_compliance.name + target_id = "PatchComplianceNotification" + arn = aws_sns_topic.patch_notifications.arn +} + +# SNS Topic for Patch Notifications +resource "aws_sns_topic" "patch_notifications" { + name = "patch-compliance-notifications" +} + + +# SSM State Manager association for CloudWatch agent updates +resource "aws_ssm_association" "cloudwatch_agent_update" { + name = "AWS-ConfigureAWSPackage" + + targets { + key = "tag:Monitoring" + values = ["enabled"] + } + + parameters = { + action = "Install" + name = "AmazonCloudWatchAgent" + } +} + +locals { + normal_config = { + "agent" : { + "metrics_collection_interval" : 60 + }, + "metrics" : { + "namespace" : "CWAgent", + "append_dimensions" : { + "InstanceId" : "$${aws:InstanceId}" + }, + "metrics_collected" : { + "mem" : { + "measurement" : [ + "used_percent", + "used", + "total", + "inactive", + "free", + "cached", + "buffered", + "available_percent", + "available", + "active" + ] + }, + "cpu" : { + "measurement" : [ + "usage_active", + "time_guest_nice", + "time_idle", + "time_irq", + "time_iowait", + "time_guest", + "time_active", + "time_softirq", + "time_nice", + "time_system", + "time_user", + "time_steal", + "usage_guest", + "usage_guest_nice", + "usage_idle", + "usage_iowait", + "usage_irq", + "usage_nice", + "usage_softirq", + "usage_steal" + ] + }, + "disk" : { + "measurement" : [ + "used_percent", + "free", + "inodes_free", + "inodes_total", + "inodes_used", + "total", + "used" + ] + }, + "diskio" : { + "measurement" : [ + "read_bytes", + "write_bytes", + "iops_in_progress", + "io_time", + "reads", + "writes", + "read_time", + "write_time" + ] + }, + "net" : { + "measurement" : [ + "bytes_sent", + "bytes_recv", + "drop_in", + "drop_out", + "err_in", + "err_out", + "packets_sent", + "packets_recv" + ] + }, + "netstat" : { + "measurement" : [ + "tcp_established", + "tcp_close", + "tcp_close_wait", + "tcp_closing", + "tcp_fin_wait1", + "tcp_last_ack", + "tcp_listen", + "tcp_fin_wait2", + "tcp_none", + "tcp_syn_recv", + "tcp_time_wait", + "tcp_syn_sent", + "udp_socket" + ] + }, + "processes" : { + "measurement" : [ + "running", + "wait", + "zombies", + "total_threads", + "total", + "paging", + "sleeping", + "stopped", + "dead", + "blocked", + "idle" + ] + }, + "swap" : { + "measurement" : [ + "used_percent", + "free", + "used" + ] + } + } + }, + "traces" : { + "traces_collected" : { + "xray" : {}, + "otlp" : {}, + "application_signals" : {} + } + } + } + + detailed_config = { + agent = { + metrics_collection_interval = 60 + run_as_user = "root" + } + metrics = { + namespace = "CustomEC2Metrics" + metrics_collected = { + cpu = { + resources = ["*"] + measurement = [ + "cpu_usage_idle", + "cpu_usage_user", + "cpu_usage_system", + "cpu_usage_iowait" + ] + totalcpu = true + metrics_collection_interval = 60 + } + mem = { + measurement = [ + "mem_used_percent", + "mem_total", + "mem_used", + "mem_cached", + "mem_buffered" + ] + metrics_collection_interval = 60 + } + disk = { + resources = ["/"] + measurement = [ + "disk_used_percent", + "disk_free", + "disk_total", + "disk_inodes_free", + "disk_inodes_used" + ] + metrics_collection_interval = 60 + } + netstat = { + metrics_collection_interval = 60 + measurement = [ + "tcp_established", + "tcp_time_wait" + ] + } + } + } + logs = { + log_stream_name = "logs" + force_flush_interval = 60 + logs_collected = { + files = { + collect_list = [ + { + file_path = "/var/log/messages" + log_group_name = "/ec2/system" + log_stream_name = "{instance_id}" + timezone = "UTC" + }, + + # nginx + # { + # file_path = "/var/log/nginx/swarms/access.log" + # log_group_name = "/swarms/ngnix_access" + # log_stream_name = "{instance_id}" + # timezone = "UTC" + # }, + # { + # file_path = "/var/log/nginx/swarms/error.log" + # log_group_name = "/swarms/nginx_error" + # log_stream_name = "{instance_id}" + # timezone = "UTC" + # }, + + { + file_path = "/var/log/cloud-init-output.log" + log_group_name = "/ec2/init" + log_stream_name = "{instance_id}" + timezone = "UTC" + }, + { + file_path = "/var/log/eliza_systemd.log" + log_group_name = "/eliza/systemd" + log_stream_name = "{instance_id}" + timezone = "UTC" + }, + { + file_path = "/var/log/secure" + log_group_name = "/ec2/secure" + log_stream_name = "{instance_id}" + timezone = "UTC" + } + ] + } + } + } + } +} +# Updated CloudWatch Agent configuration in SSM Parameter Store +resource "aws_ssm_parameter" "cw_agent_config" { + name = "/cloudwatch-agent/config" + description = "CloudWatch agent configuration" + type = "SecureString" + value = jsonencode(local.normal_config) +} + +# SSM command to update CloudWatch configuration on running instances +resource "aws_ssm_association" "update_cloudwatch_config" { + name = "AWS-RunShellScript" + + targets { + key = "tag:Monitoring" + values = ["enabled"] + } + + parameters = { + commands = "amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c ssm:${aws_ssm_parameter.cw_agent_config.name}\n systemctl restart amazon-cloudwatch-agent" + + } +} + +resource "aws_cloudwatch_log_group" "log_groups" { + for_each = toset([ +# "/swarms/ngnix_access", +# "/swarms/nginx_error", + "/eliza/systemd", "/ec2/init"]) + name = each.key + retention_in_days = 30 + #kms_key_id = "arn:aws:kms:us-east-2:916723593639:key/cc8e1ee7-a05b-4642-bd81-ba5548635590" +} + +# CloudWatch Log Groups for collected logs +resource "aws_cloudwatch_log_group" "system_logs" { + name = "/ec2/system" + retention_in_days = 30 + #kms_key_id = "arn:aws:kms:us-east-2:916723593639:key/cc8e1ee7-a05b-4642-bd81-ba5548635590" +} + +resource "aws_cloudwatch_log_group" "secure_logs" { + name = "/ec2/secure" + retention_in_days = 30 + #kms_key_id = "arn:aws:kms:us-east-2:916723593639:key/cc8e1ee7-a05b-4642-bd81-ba5548635590" +} + +# SSM Document for CloudWatch agent troubleshooting +resource "aws_ssm_document" "cloudwatch_agent_troubleshoot" { + name = "TroubleshootCloudWatchAgent" + document_type = "Command" + document_format = "YAML" + + content = <