-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathgophish_cloud_init.tf
245 lines (227 loc) · 10.1 KB
/
gophish_cloud_init.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
# cloud-init commands for configuring Gophish instances
locals {
# This value is used multiple times below, so we may as well define it
# in one place.
pca_gophish_composition_dir = "/var/pca/pca-gophish-composition"
}
data "cloudinit_config" "gophish_cloud_init_tasks" {
count = lookup(var.operations_instance_counts, "gophish", 0)
base64_encode = true
gzip = true
# Note: The filename parameters in each part below are only used to
# name the mime-parts of the user-data. They do not affect the
# final name for the templates. For any x-shellscript parts, the
# filenames will also be used as a filename in the scripts
# directory.
# Set the local hostname
#
# We need to go ahead and set the local hostname to the correct
# value that will eventually be obtained from DHCP, since we make
# liberal use of the "{local_hostname}" placeholder in our AWS
# CloudWatch Agent configuration.
part {
content = templatefile(
"${path.module}/cloud-init/set-hostname.tpl.yml", {
# Note that the hostname here is identical to what is set in
# the corresponding DNS A record.
fqdn = "gophish${count.index}.${aws_route53_zone.assessment_private.name}"
hostname = "gophish${count.index}"
})
content_type = "text/cloud-config"
filename = "set-hostname.yml"
merge_type = "list(append)+dict(recurse_array)+str()"
}
# TODO: Remove the following two parts when and if that becomes
# possible. See #234 for more details.
# Fix the DHCP options in the Canonical Netplan configuration
# created by cloud-init.
#
# The issue is that Netplan uses a default of false for
# dhcp4-overrides.use-domains, and cloud-init does not explicitly
# set this key or provide any way to do so.
#
# See these issues for more details:
# - cisagov/skeleton-packer#300
# - canonical/cloud-init#4764
part {
content = templatefile(
"${path.module}/cloud-init/fix-dhcp.tpl.py", {
netplan_config = "/etc/netplan/50-cloud-init.yaml"
})
content_type = "text/x-shellscript"
filename = "fix-dhcp.py"
merge_type = "list(append)+dict(recurse_array)+str()"
}
# Now that the DHCP options in the Canonical Netplan configuration
# created by cloud-init have been fixed, reapply the Netplan
# configuration.
#
# The issue is that Netplan uses a default of false for
# dhcp4-overrides.use-domains, and cloud-init does not explicitly
# set this key or provide any way to do so.
#
# See these issues for more details:
# - cisagov/skeleton-packer#300
# - canonical/cloud-init#4764
part {
content = file("${path.module}/cloud-init/fix-dhcp.yml")
content_type = "text/cloud-config"
filename = "fix-dhcp.yml"
merge_type = "list(append)+dict(recurse_array)+str()"
}
# Create an fstab entry for the EFS share
part {
content = templatefile(
"${path.module}/cloud-init/efs-mount.tpl.yml", {
# Use the access point that corresponds with the EFS mount target used
efs_ap_id = aws_efs_access_point.access_point[var.private_subnet_cidr_blocks[0]].id
# Just mount the EFS mount target in the first private subnet
efs_id = aws_efs_mount_target.target[var.private_subnet_cidr_blocks[0]].file_system_id
group = var.efs_users_group_name
mount_point = "/share"
owner = data.aws_ssm_parameter.vnc_username.value
})
content_type = "text/cloud-config"
filename = "efs_mount.yml"
merge_type = "list(append)+dict(recurse_array)+str()"
}
# This shell script loops until the EFS share is mounted. We do
# make the instance depend on the EFS share in the Terraform code,
# but it is still possible for an instance to boot up without
# mounting the share. See this issue comment for more details:
# https://github.com/cisagov/cool-assessment-terraform/issues/85#issuecomment-754052796
part {
content = templatefile(
"${path.module}/cloud-init/mount-efs-share.tpl.sh", {
mount_point = "/share"
})
content_type = "text/x-shellscript"
filename = "mount-efs-share.sh"
}
# Create the JSON file used to configure Docker daemon. This allows us
# to tell Docker to store volume data on our persistent
# EBS Docker data volume (created below).
part {
content = templatefile(
"${path.module}/cloud-init/write-docker-daemon-json.tpl.yml", {
docker_data_root_dir = local.docker_volume_mount_point
})
content_type = "text/cloud-config"
filename = "write-docker-daemon-json.yml"
merge_type = "list(append)+dict(recurse_array)+str()"
}
# Prepare and mount EBS volume to hold Docker data-root data.
# Note that this script and the next one must take place in a certain order,
# so we prepend numbers to the script names to force that to that happen.
#
# Here is where the user scripts are called by cloud-init:
# https://github.com/canonical/cloud-init/blob/70c28373fef35827570c1c7803eb5338d8a6fcfb/cloudinit/config/cc_scripts_user.py#L38
#
# And here is where you can see how cloud-init sorts the scripts:
# https://github.com/canonical/cloud-init/blob/70c28373fef35827570c1c7803eb5338d8a6fcfb/cloudinit/subp.py#L366
part {
content = templatefile(
"${path.module}/cloud-init/ebs-disk-setup.tpl.sh", {
device_name = local.docker_ebs_device_name
fs_type = "ext4"
label = "docker_data"
mount_options = "defaults"
mount_point = local.docker_volume_mount_point
num_disks = 2
})
content_type = "text/x-shellscript"
filename = "01-ebs-disk-setup.sh"
}
# Copy Docker data from default directory to new data-root directory.
part {
content = templatefile(
"${path.module}/cloud-init/copy-docker-data-to-new-root-dir.tpl.sh", {
mount_point = local.docker_volume_mount_point
new_data_root_dir = local.docker_volume_mount_point
})
content_type = "text/x-shellscript"
filename = "02-copy-docker-data-to-new-root-dir.sh"
}
# Install certificate for postfix.
part {
content = templatefile(
"${path.module}/cloud-init/install-certificates.tpl.py", {
aws_region = var.aws_region
cert_bucket_name = var.cert_bucket_name
# We use the element() function below instead of the built-in list
# index syntax because we want the "wrap-around" behavior provided
# by element(). This means that the number of items in
# var.email_sending_domains does not have to exactly match the number
# of Gophish instances. For details, see:
# https://www.terraform.io/docs/language/functions/element.html
cert_read_role_arn = module.email_sending_domain_certreadrole[element(var.email_sending_domains, count.index)].role.arn
create_dest_dirs = false
full_chain_pem_dest = "${local.pca_gophish_composition_dir}/secrets/postfix/fullchain.pem"
priv_key_pem_dest = "${local.pca_gophish_composition_dir}/secrets/postfix/privkey.pem"
# Certbot stores wildcard certs in a directory with the name
# of the domain, instead of pre-pending an asterisk.
server_fqdn = element(var.email_sending_domains, count.index)
})
content_type = "text/x-shellscript"
filename = "install-certificates-postfix.py"
}
# Configure postfix in the pca-gophish Docker composition.
part {
content = templatefile(
"${path.module}/cloud-init/postfix-setup.tpl.yml", {
email_sending_domain = element(var.email_sending_domains, count.index)
pca_docker_compose_file = "${local.pca_gophish_composition_dir}/docker-compose.yml"
})
content_type = "text/cloud-config"
filename = "postfix-setup.yml"
merge_type = "list(append)+dict(recurse_array)+str()"
}
# Install certificate for Gophish.
part {
content = templatefile(
"${path.module}/cloud-init/install-certificates.tpl.py", {
aws_region = var.aws_region
cert_bucket_name = var.cert_bucket_name
# We use the element() function below instead of the built-in list
# index syntax because we want the "wrap-around" behavior provided
# by element(). This means that the number of items in
# var.email_sending_domains does not have to exactly match the number
# of Gophish instances. For details, see:
# https://www.terraform.io/docs/language/functions/element.html
cert_read_role_arn = module.email_sending_domain_certreadrole[element(var.email_sending_domains, count.index)].role.arn
create_dest_dirs = false
full_chain_pem_dest = "${local.pca_gophish_composition_dir}/secrets/gophish/phish_fullchain.pem"
priv_key_pem_dest = "${local.pca_gophish_composition_dir}/secrets/gophish/phish_privkey.pem"
# Certbot stores wildcard certs in a directory with the name
# of the domain, instead of pre-pending an asterisk.
server_fqdn = element(var.email_sending_domains, count.index)
})
content_type = "text/x-shellscript"
filename = "install-certificates-gophish.py"
}
# Configure Thunderbird to autoconfigure email accounts from the
# appropriate email-sending domain.
part {
content = templatefile(
"${path.module}/cloud-init/write-thunderbird-email-autoconfig.tpl.yml", {
email_sending_domain = element(var.email_sending_domains, count.index)
})
content_type = "text/cloud-config"
filename = "write-thunderbird-email-autoconfig.yml"
merge_type = "list(append)+dict(recurse_array)+str()"
}
# Ensure email-sending domain is mapped to 127.0.0.1 in /etc/hosts.
# Note: Even though /etc/hosts on this instance is managed by cloud-init
# in /etc/cloud/templates/hosts.debian.tmpl, we can safely modify
# /etc/hosts here because our change will be applied at every startup,
# after the earlier cloud-init code applies hosts.debian.tmpl.
part {
content = templatefile(
"${path.module}/cloud-init/map-hostname-to-localhost.tpl.sh", {
hostname = element(var.email_sending_domains, count.index)
hosts_file = "/etc/hosts"
})
content_type = "text/x-shellscript"
filename = "map-hostname-to-localhost.sh"
}
}