diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 952003f..c0c3a40 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,3 +20,4 @@ /Terraform/deploy-fsx-ontap-sqlserver/ @varunrai /Terraform/deploy-fsx-ontap-fileshare-access/ @varunrai /Terraform/deploy-fsx-ontap/ @kcantrel +/Terraform/fsxn-replicate/ @nichollri diff --git a/README.md b/README.md index 80c8345..86f66f9 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,9 @@ Have a great idea? We'd love to hear it! Please email us at [ng-fsxn-github-samp * [AI](/AI) * [GenAI ChatBot application sample](/AI/GenAI-ChatBot-application-sample) +* [Anisble](/Ansible) + * [FSx ONTAP inventory report](/Ansible/fsx_inventory_report) + * [SnapMirror report](/Ansible/snapmirror_report) * [CloudFormation](/CloudFormation) * [deploy-fsx-ontap](/CloudFormation/deploy-fsx-ontap) * [EKS](/EKS) @@ -39,6 +42,7 @@ Have a great idea? We'd love to hear it! Please email us at [ng-fsxn-github-samp * [k8s applications non-stdout logs collection into ELK](/Solutions/EKS-logs-to-ELK) * [Terraform](/Terraform) * [FSx ONTAP deployment using Terraform](/Terraform/deploy-fsx-ontap) + * [FSx ONTAP Replication](/Terraform/fsxn-replication) * [Deployment of SQL Server on EC2 with FSx ONTAP](/Terraform/deploy-fsx-ontap-sqlserver) * [Deployment of FSx ONTAP with VPN for File Share Access](/Terraform/deploy-fsx-ontap-fileshare-access) diff --git a/Terraform/fsxn-replicate/DR_FSxN_variables.tf b/Terraform/fsxn-replicate/DR_FSxN_variables.tf new file mode 100644 index 0000000..098a998 --- /dev/null +++ b/Terraform/fsxn-replicate/DR_FSxN_variables.tf @@ -0,0 +1,216 @@ +# Variables for the Disaster Recovery FSx for ONTAP file system to be created. + +variable "dr_aws_region" { + description = "AWS region where you want the Secondary(DR) FSx for ONTAP file system." + type = string + default = "" +} + +variable "dr_fsx_name" { + description = "The name to assign to the destination FSx for ONTAP file system." + type = string + default = "" +} + +variable "dr_clus_name" { + description = "This is the name of the cluster given for ONTAP TF connection profile. This is a user creatred value, that can be any string. It is referenced in many ONTAP TF resources." + type = string + default = "dr_clus" +} + +variable "dr_fsx_deploy_type" { + description = "The file system deployment type. Supported values are 'MULTI_AZ_1', 'SINGLE_AZ_1', 'MULTI_AZ_2', and 'SINGLE_AZ_2'. MULTI_AZ_1 and SINGLE_AZ_1 are Gen 1. MULTI_AZ_2 and SINGLE_AZ_2 are Gen 2." + type = string + default = "SINGLE_AZ_1" + validation { + condition = contains(["MULTI_AZ_1", "SINGLE_AZ_1", "MULTI_AZ_2", "SINGLE_AZ_2"], var.dr_fsx_deploy_type) + error_message = "Invalid deployment type. Valid values are MULTI_AZ_1, SINGLE_AZ_1, MULTI_AZ_2 or SINGLE_AZ_2." + } +} + +variable "dr_fsx_subnets" { + description = "The primary subnet ID, and secondary subnet ID if you are deploying in a Multi AZ environment, file system will be accessible from. For MULTI_AZ deployment types both subnets are required. For SINGLE_AZ deployment type, only the primary subnet is used." + type = map(any) + default = { + "primarysub" = "subnet-11111111" + "secondarysub" = "subnet-33333333" + } +} + +variable "dr_fsx_capacity_size_gb" { + description = "The storage capacity in GiBs of the FSx for ONTAP file system. Valid values between 1024 (1 TiB) and 1048576 (1 PiB). Gen 1 deployment types are limited to 192 TiB. Gen 2 Multi AZ is limited to 512 TiB. Gen 2 Single AZ is limited to 1 PiB." + type = number + default = 1024 + validation { + condition = var.dr_fsx_capacity_size_gb >= 1024 && var.dr_fsx_capacity_size_gb <= 1048576 + error_message = "Invalid capacity size. Valid values are between 1024 (1TiB) and 1045876 (1 PiB)." + } +} + +variable "dr_fsx_tput_in_MBps" { + description = "The throughput capacity (in MBps) for the file system. Valid values are 128, 256, 512, 1024, 2048, and 4096 for Gen 1, and 384, 768, 1536, 3072 and 6144 for Gen 2." + type = string + default = "128" + validation { + condition = contains(["128", "256", "384", "512", "768", "1024", "1536", "2048", "3072", "4086", "6144"], var.dr_fsx_tput_in_MBps) + error_message = "Invalid throughput value. Valid values are 128, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4086, 6144." + } +} + +variable "dr_ha_pairs" { + description = "The number of HA pairs in the file system. Valid values are from 1 through 12. Only single AZ Gen 2 deployment type supports more than 1 HA pair." + type = number + default = 1 + validation { + condition = var.dr_ha_pairs >= 1 && var.dr_ha_pairs <= 12 + error_message = "Invalid number of HA pairs. Valid values are from 1 through 12." + } +} + +variable "dr_endpoint_ip_address_range" { + description = "The IP address range that the FSx for ONTAP file system will be accessible from. This is only used for Multi AZ deployment types and must be left a null for Single AZ deployment types." + type = string + default = null +} + +variable "dr_route_table_ids" { + description = "An array of routing table IDs that will be modified to allow access to the FSx for ONTAP file system. This is only used for Multi AZ deployment types and must be left as null for Single AZ deployment types." + type = list(string) + default = [] +} + +variable "dr_disk_iops_configuration" { + description = "The SSD IOPS configuration for the file system. Valid modes are 'AUTOMATIC' (3 iops per GB provisioned) or 'USER_PROVISIONED'. NOTE: Due to a bug in the AWS FSx provider, if you want AUTOMATIC, then leave this variable empty. If you want USER_PROVISIONED, then add a 'mode=USER_PROVISIONED' (with USER_PROVISIONED enclosed in double quotes) and 'iops=number' where number is between 1 and 160000." + type = map(any) + default = {} +} + +variable "dr_tags" { + description = "Tags to be applied to the FSx for ONTAP file system. The format is '{Name1 = value, Name2 = value}' where value should be enclosed in double quotes." + type = map(any) + default = {} +} + +variable "dr_maintenance_start_time" { + description = "The preferred start time to perform weekly maintenance, in UTC time zone. The format is 'D:HH:MM' format. D is the day of the week, where 1=Monday and 7=Sunday." + type = string + default = "7:00:00" +} + +variable "dr_kms_key_id" { + description = "ARN for the KMS Key to encrypt the file system at rest. Defaults to an AWS managed KMS Key." + type = string + default = null +} + +variable "dr_backup_retention_days" { + description = "The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days." + type = number + default = 0 + validation { + condition = var.dr_backup_retention_days >= 0 && var.dr_backup_retention_days <= 90 + error_message = "Invalid backup retention days. Valid values are between 0 and 90." + } +} + +variable "dr_daily_backup_start_time" { + description = "A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. Requires automatic_backup_retention_days to be set." + type = string + default = "00:00" +} + +variable "dr_svm_name" { + description = "The name of the Storage Virtual Machine" + type = string + default = "fsx_dr" +} + +variable "dr_root_vol_sec_style" { + description = "Specifies the root volume security style, Valid values are UNIX, NTFS, and MIXED (although MIXED is not recommended). All volumes created under this SVM will inherit the root security style unless the security style is specified on the volume." + type = string + default = "UNIX" +} + +/* + * These last set of variables have to do with a security group that can be optionally + * created. The security group will have all the ingress rules that will allow access + * to all the protocols that an FSx for ONTAP file system supports (e.g. SMB, NFS, etc). See the security_groups.tf + * for more information. + * + * If you decide to create the security group, you can specify either the CIDR block to + * be used as the source to the ingress rules OR the ID of a security group to be used as + * the source to the ingress rules. You can't specify both. + * + * If you decide not to create the security group, you must set the security_group_id to + * the ID of the security group that you want to use. + * + */ +variable "dr_create_sg" { + description = "Determines whether the Security Group should be created as part of this deployment or not." + type = bool + default = true +} + +variable "dr_security_group_ids" { + description = "If you are not creating the security group, provide a list of IDs of security groups to be used." + type = list(string) + default = [] +} + +variable "dr_security_group_name_prefix" { + description = "The prefix to the security group name that will be created." + type = string + default = "fsxn-sg" +} + +variable "dr_cidr_for_sg" { + description = "The cidr block to be used for the created security ingress rules. Set to an empty string if you want to use the source_sg_id as the source." + type = string + default = "10.0.0.0/8" +} + +variable "dr_source_sg_id" { + description = "The ID of the security group to allow access to the FSx for ONTAP file system. Set to an empty string if you want to use the cidr_for_sg as the source." + type = string + default = "" +} + +variable "dr_vpc_id" { + description = "The VPC ID where the DR FSx for ONTAP file system and security group will be created." + type = string + default = "" +} + +variable "dr_username_pass_secrets_id" { + description = "Name of secret ID in AWS secrets. This secret needs to be in the same region as the DR FSx for ONTAP file system." + type = string + default = "" +} + +variable "dr_snapmirror_policy_name" { + description = "Name of snamirror policy to create" + type = string + default = "" +} + +variable "dr_transfer_schedule" { + description = "The schedule used to update asynchronous relationships." + type = string + default = "hourly" +} + +variable "dr_retention" { + description = "Rules for Snapshot copy retention." + type = string + default = <<-EOF +[{ + "label": "weekly", + "count": 4 +}, +{ + "label": "daily", + "count": 7 +}] +EOF +} + diff --git a/Terraform/fsxn-replicate/Primary_FSxN_variables.tf b/Terraform/fsxn-replicate/Primary_FSxN_variables.tf new file mode 100644 index 0000000..0e3b1c5 --- /dev/null +++ b/Terraform/fsxn-replicate/Primary_FSxN_variables.tf @@ -0,0 +1,47 @@ +variable "prime_fsxid" { + description = "FSx for ONTAP file system ID of the primary cluster." + type = string + default = "" +} + +variable "prime_clus_name" { + description = "This is the name of the cluster given for ONTAP TerraForm connection profile. This is a user creatred value, that can be any string. It is referenced in many ONTAP TF resources." + type = string + default = "primary_clus" +} + +variable "prime_svm" { + description = "Name of svm for replication in the primary cluster." + type = string + default = "" +} + +variable "prime_cluster_vserver" { + description = "Name of cluster vserver for inter cluster lifs in the primary cluster. This can be found by running network interface show on the source cluster (network interface show -services default-intercluster). It will be formatted like this FsxIdxxxxxxxx" + type = string + default = "" +} + +variable "prime_aws_region" { + description = "AWS regionfor the Primary FSx for ONTAP file system" + type = string + default = "" +} + +variable "username_pass_secrets_id" { + description = "Name of secret ID in AWS secrets. This secret needs to be in the same region as the Primary FSx for ONTAP file system." + type = string + default = "" +} + +variable "list_of_volumes_to_replicate" { + description = "list of volumes to replicate to dr fsxn" + type = list(string) + default = [] +} + +variable "validate_certs" { + description = "Do we validate the cluster certs (true or false). If true then ONTAP requires valid, non-self signed SSL certificates." + type = string + default = "false" +} diff --git a/Terraform/fsxn-replicate/README.md b/Terraform/fsxn-replicate/README.md new file mode 100644 index 0000000..b3cc378 --- /dev/null +++ b/Terraform/fsxn-replicate/README.md @@ -0,0 +1,217 @@ +# Deploy DR FSx for ONTAP file system and create SnapMirror relationships from source FSx for ONTAP file system + +## Introduction +This repository contains a method to take an existing FSx for ONTAP file system and replicate volumes to a new FSx for ONTAP file system for disaster recovery or backup purposes. It leverages both AWS FSx Terraform provider as well as the ONTAP Terraform provider. + +Note: Currently it supports replicating volumes within a single SVM. + +## Setup + +### Overview + +You will need to define some key characteristics of the destination FSx for ONTAP file system that will be created, such as deployment type and througput, full list below. You also will need to define the source SVM and list of volumes to replicate, and replication parameters. + +These values can be found in the following variables files: Primary_FSxN_variables.tf and DR_FSxN_variables.tf files. The values should be set in the terraform.tfvars file. + +### Prerequisites +You have an existing FSx for ONTAP file system that you want to replicate to a new FSx for ONTAP file system. There is proper networking connectivy between the source FSx for ONTAP file system and the region/VPC/subnets where the destination FSx for ONTAP file system will be deployed. + +SnapMirror replication requires **ICMP** and ports **11104** and **11105**. + +### Inputs (Primary Cluster) + +These variables are to be filled in the terraform.tfvars file, please see instruction below in the Usage section. + +| Name | Description | Type | Default | Required | +| --------------------- | ------------------------------------------------------------------------------------------------------------- | -------------- | ------------------------------------ | :------: | +| prime_fsxid | FSx for ONTAP file system ID of the primary cluster. | `string` | | Yes | +| prime_svm | Name of the primary SVM for the volumes that will be replicated. | `string` | | Yes | +| prime_cluster_vserver | Name of the ONTAP cluster vserver for intercluster LIFs in the primary cluster. Can be found by running `network interface show -services default-intercluster` on the primary cluster. It will have the format FsxId################# | `string` | | Yes | +| prime_aws_region | AWS region of the primary FSx for ONTAP file system | `string` | | Yes | +| username_pass_secrets_id | Name of the secrets ID in AWS secrets. The AWS Secret should have a format of a key `username`, which should have a value of `fsxadmin,` and another key `password` with its value set to the password of the FSxN. *Note*: The secret must be in the same region as the FSx for ONTAP file system it is associated with. | `string` | | Yes | +| validate_certs | When connecting to an FSx for ONTAP file system, should Terraform validate the SSL certificate (true or false)? This should be set to `false` if you are using the default self-signed SSL certificate. | `string` | false | No | +| list_of_volumes_to_replicate | List of volume names to replicate to the destination FSx for ONTAP file system | `list(string)` | | Yes | + + +### Inputs (DR Cluster) + +| Name | Description | Type | Default | Required | +| --------------------- | ------------------------------------------------------------------------------------------------------------- | -------------- | ------------------------------------ | :------: | +| dr_aws_region | AWS region where you want the Secondary(DR) FSx for ONTAP file system to be deployed. | `string` | | Yes | +| dr_fsx_name | The name to assign to the destination FSx for ONTAP file system that will be created. | `string` | | Yes | +| dr_fsx_deploy_type | The file system deploment type. Supported values are 'MULTI_AZ_1', 'SINGLE_AZ_1', 'MULTI_AZ_2', and 'SINGLE_AZ_2'. MULTI_AZ_1 and SINGLE_AZ_1 are Gen 1. MULTI_AZ_2 and SINGLE_AZ_2 are Gen 2. | `string` | SINGLE_AZ_1 | Yes | +| dr_fsx_subnets | The primary subnet ID, and secondary subnet ID if you are deploying in a Multi AZ environment, file system will be accessible from. For MULTI_AZ deployment types both subnets are required. For SINGLE_AZ deployment type, only the primary subnet is used. | `map(any)` | | Yes | +| dr_fsx_capacity_size_gb | The storage capacity in GiBs of the FSx for ONTAP file system. Valid values between 1024 (1 TiB) and 1048576 (1 PiB). Gen 1 deployment types are limited to 192 TiB. Gen 2 Multi AZ is limited to 512 TiB. Gen 2 Single AZ is limited to 1 PiB. The sizing should take into account the size of the volumes you plan to replicate and the tiering policy of the volumes. | `number` | 1024 | Yes | +| dr_fsx_tput_in_MBps | The throughput capacity (in MBps) for the file system. Valid values are 128, 256, 512, 1024, 2048, and 4096 for Gen 1, and 384, 768, 1536, 3072 and 6144 for Gen 2. | `string` | 128 | Yes | +| dr_ha_pairs | The number of HA pairs in the file system. Valid values are from 1 through 12. Only single AZ Gen 2 deployment type supports more than 1 HA pair. | `number` | 1 | Yes | +| dr_endpoint_ip_address_range | The IP address range that the FSx for ONTAP file system will be accessible from. This is only used for Multi AZ deployment types and must be left a null for Single AZ deployment types. | `string` | null | No | +| dr_route_table_ids | An array of routing table IDs that will be modified to allow access to the FSx for ONTAP file system. This is only used for Multi AZ deployment types and must be left as null for Single AZ deployment types. | `list(string)` | [] | Only required for Multi-AZ | +| dr_disk_iops_configuration | The SSD IOPS configuration for the file system. Valid modes are 'AUTOMATIC' (3 iops per GB provisioned) or 'USER_PROVISIONED'. NOTE: Due to a bug in the AWS FSx provider, if you want AUTOMATIC, then leave this variable empty. If you want USER_PROVISIONED, then add a 'mode=USER_PROVISIONED' (with USER_PROVISIONED enclosed in double quotes) and 'iops=number' where number is between 1 and 160000. | `map(any)` | {} | No | +| dr_tags | Tags to be applied to the FSx for ONTAP file system. The format is '{Name1 = value, Name2 = value}' where value should be enclosed in double quotes. | `map(any)` | {} | No | +| dr_maintenance_start_time | The preferred start time to perform weekly maintenance, in UTC time zone. The format is 'D:HH:MM' format. D is the day of the week, where 1=Monday and 7=Sunday. | `string` | 7:00:00 | No | +| dr_svm_name | The name of the Storage Virtual Machine that will house the replicated volumes. | `string` | fsx_dr | Yes | +| dr_root_vol_sec_style | Specifies the root volume security style, Valid values are UNIX, NTFS, and MIXED (although MIXED is not recommended). All volumes created under this SVM will inherit the root security style unless the security style is specified on the volume. | `string` | UNIX | Yes | +| dr_username_pass_secrets_id | Name of the secrets ID in AWS secrets. The AWS Secret should have a format of a key `username`, which should have a value of `fsxadmin,` and another key `password` with its value set to the password of the FSxN. *Note*: The secret must be in the same region as the FSx for ONTAP file system it is associated with. | `string` | | Yes | +| dr_vpc_id | The VPC ID where the DR FSx for ONTAP file system (and security group if this option is selected) will be created. | `string` | | Yes | +| dr_snapmirror_policy_name | Name of snamirror policy to create. | `string` | | Yes | +| dr_transfer_schedule | The schedule used to update asynchronous relationships. | `string` | hourly | No | +| dr_retention | Rules for Snapshot copy retention. See [Retention Schema](https://registry.terraform.io/providers/NetApp/netapp-ontap/latest/docs/resources/snapmirror_policy_resource#retention) for more information. | `string` | [{ "label": "weekly", "count": 4 }, { "label": "daily", "count": 7 }] | No | +## Inputs (Security Group - DR Cluster) + +| Name | Description | Type | Default | Required | +| --------------------- | ------------------------------------------------------------------------------------------------------------- | -------------- | ------------------------------------ | :------: | +| dr_create_sg | Determines whether the Security Group should be created as part of this deployment or not. | `bool` | true | Yes | +| dr_security_group_ids | If you are not creating the security group, provide a list of IDs of security groups to be used. | `list(string)` | [] | No | +| dr_security_group_name_prefix | The prefix to the security group name that will be created. | `string` | fsxn-sg | No | +| dr_cidr_for_sg | The cidr block to be used for the created security ingress rules. Set to an empty string if you want to use the source_sg_id as the source. | `string` | 10.0.0.0/8 | No | +| dr_source_sg_id | The ID of the security group to allow access to the FSx for ONTAP file system. Set to an empty string if you want to use the cidr_for_sg as the source. | `string` | | No | + +## Usage + +#### 1. Clone the repository + +In your server's terminal, navigate to the location where you wish to store this Terraform repository, and clone the repository using your preferred authentication type. In this example we are using HTTPS clone: + +```shell +git clone https://github.com/NetApp/FSx-ONTAP-samples-scripts +``` + +#### 2. Navigate to the directory + +```shell +cd FSx-ONTAP-samples-scripts/Terraform/fsxn-replicate +``` + +#### 3. Initialize Terraform + +This directory represents a standalone Terraform module. Run the following command to initialize the module and install all dependencies: + +```shell +terraform init +``` + +A succesfull initialization should display the following output: + +``` + +Initializing the backend... +Initializing modules... + +Initializing provider plugins... +- Reusing previous version of netapp/netapp-ontap from the dependency lock file +- Reusing previous version of hashicorp/aws from the dependency lock file +- Using previously-installed netapp/netapp-ontap v1.1.4 +- Using previously-installed hashicorp/aws v5.69.0 + +Terraform has been successfully initialized! + +You may now begin working with Terraform. Try running "terraform plan" to see +any changes that are required for your infrastructure. All Terraform commands +should now work. + +If you ever set or change modules or backend configuration for Terraform, +rerun this command to reinitialize your working directory. If you forget, other +commands will detect it and remind you to do so if necessary. + +``` + +You can see that Terraform recognizes the modules required by our configuration: `hashicorp/aws` and `hashicorpt/netapp-ontap`. + +#### 4. Create Variables Values + +- Copy or Rename the file **`terraform.sample.tfvars`** to **`terraform.tfvars`** + +- Open the **`terraform.tfvars`** file in your preferred text editor. Update the values of the variables to match your preferences and save the file. This will ensure that the Terraform code deploys resources according to your specifications. + +- Set the parameters in terraform.tfvars + + ##### Sample file + + *** + + ```ini + # Primary FSxN variables + prime_hostname = "" + prime_fsxid = "fs-xxxxxxxxxxxxxxxxx" + prime_svm = "fsx" + prime_cluster_vserver = "FsxIdxxxxxxxxxxxxxxxx" + prime_aws_region = "us-west-2" + username_pass_secrets_id = "" + list_of_volumes_to_replicate = ["vol1", "vol2", "vol3"] + + # DR FSxN variables + dr_aws_region = "us-west-2" + dr_fsx_name = "terraform-dr-fsxn" + dr_fsx_subnets = { + "primarysub" = "subnet-11111111" + "secondarysub" = "subnet-33333333" + } + dr_svm_name = "fsx_dr" + dr_security_group_name_prefix = "fsxn-sg" + dr_vpc_id = "vpc-xxxxxxxx" + dr_username_pass_secrets_id = "" + dr_snapmirror_policy_name = "" + ``` + +> [!IMPORTANT] +> **Make sure to replace the values with ones that match your AWS environment and needs.** + +#### 5. Create a Terraform plan + +Run the following command to create an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure: + +```shell +terraform plan +``` + +Ensure that the proposed changes match what you expected before you apply the changes! + +#### 6. Apply the Terraform plan + +Run the following command to execute the Terrafom code and apply the changes proposed in the `plan` step: + +```shell +terraform apply +``` + +There will be a lot of output from the `terraform apply` command. At the end, the output +should look simlar to: +``` +Apply complete! Resources: 33 added, 0 changed, 0 destroyed. + +Outputs: + +dr_fsxn_system = { + "cluster_mgmt_ip" = toset([ + "192.168.61.96", + ]) +} +snapmirror_details = { + "unix" = { + "destination_path" = "fsx_dr:unix_dp" + "policy_name" = "dr_policy" + "source_path" = "vs1fsxninjas:unix" + } + "unix2" = { + "destination_path" = "fsx_dr:unix2_dp" + "policy_name" = "dr_policy" + "source_path" = "vs1fsxninjas:unix2" + } +} +``` +The above give you the cluster management IP address of the DR FSx for ONTAP file system, and the SnapMirror details for the volumes that were replicated. +## Author Information + +This repository is maintained by the contributors listed on [GitHub](https://github.com/NetApp/FSx-ONTAP-samples-scripts/graphs/contributors). + +## License + +Licensed under the Apache License, Version 2.0 (the "License"). + +You may obtain a copy of the License at [apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0). + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" basis, without WARRANTIES or conditions of any kind, either express or implied. + +See the License for the specific language governing permissions and limitations under the License. + +© 2024 NetApp, Inc. All Rights Reserved. diff --git a/Terraform/fsxn-replicate/main.tf b/Terraform/fsxn-replicate/main.tf new file mode 100644 index 0000000..6078a64 --- /dev/null +++ b/Terraform/fsxn-replicate/main.tf @@ -0,0 +1,207 @@ +terraform { + required_providers { + netapp-ontap = { + source = "NetApp/netapp-ontap" + version = "1.1.4" + } + + aws = { + source = "hashicorp/aws" + version = ">= 5.68.0" + } + } +} + +provider "aws" { + region = var.dr_aws_region +} + +provider "aws" { + alias = "prime-aws-region" + region = var.prime_aws_region +} + +data "aws_secretsmanager_secret_version" "ontap_prime_username_pass" { + provider = aws.prime-aws-region + secret_id = var.username_pass_secrets_id +} + +data "aws_secretsmanager_secret_version" "ontap_dr_username_pass" { + secret_id = var.dr_username_pass_secrets_id +} + + +provider "netapp-ontap" { + # A connection profile defines how to interface with an ONTAP cluster or svm. + # At least one is required. + connection_profiles = [ + { + name = var.prime_clus_name + hostname = join("", data.aws_fsx_ontap_file_system.source_fsxn.endpoints[0].management[0].ip_addresses) + username = jsondecode(data.aws_secretsmanager_secret_version.ontap_prime_username_pass.secret_string)["username"] + password = jsondecode(data.aws_secretsmanager_secret_version.ontap_prime_username_pass.secret_string)["password"] + validate_certs = var.validate_certs + }, + { + name = var.dr_clus_name + hostname = join("", aws_fsx_ontap_file_system.terraform-fsxn.endpoints[0].management[0].ip_addresses) + username = jsondecode(data.aws_secretsmanager_secret_version.ontap_prime_username_pass.secret_string)["username"] + password = jsondecode(data.aws_secretsmanager_secret_version.ontap_prime_username_pass.secret_string)["password"] + validate_certs = var.validate_certs + } + + ] +} + +resource "aws_fsx_ontap_file_system" "terraform-fsxn" { + subnet_ids = var.dr_fsx_deploy_type == "MULTI_AZ_1" || var.dr_fsx_deploy_type == "MULTI_AZ_2" ? [var.dr_fsx_subnets["primarysub"], var.dr_fsx_subnets["secondarysub"]] : [var.dr_fsx_subnets["primarysub"]] + preferred_subnet_id = var.dr_fsx_subnets["primarysub"] + + storage_capacity = var.dr_fsx_capacity_size_gb + security_group_ids = var.dr_create_sg ? [element(aws_security_group.fsx_sg[*].id, 0)] : var.dr_security_group_ids + deployment_type = var.dr_fsx_deploy_type + throughput_capacity_per_ha_pair = var.dr_fsx_tput_in_MBps + ha_pairs = var.dr_ha_pairs + endpoint_ip_address_range = var.dr_endpoint_ip_address_range + route_table_ids = var.dr_route_table_ids + dynamic "disk_iops_configuration" { + for_each = length(var.dr_disk_iops_configuration) > 0 ? [var.dr_disk_iops_configuration] : [] + + content { + iops = try(disk_iops_configuration.value.iops, null) + mode = try(disk_iops_configuration.value.mode, null) + } + } + + tags = merge(var.dr_tags, {Name = var.dr_fsx_name}) + weekly_maintenance_start_time = var.dr_maintenance_start_time + kms_key_id = var.dr_kms_key_id + automatic_backup_retention_days = var.dr_backup_retention_days + daily_automatic_backup_start_time = var.dr_backup_retention_days > 0 ? var.dr_daily_backup_start_time : null + fsx_admin_password = jsondecode(data.aws_secretsmanager_secret_version.ontap_dr_username_pass.secret_string)["password"] +} + +# Define a storage virtual machine. +resource "aws_fsx_ontap_storage_virtual_machine" "mysvm" { + file_system_id = aws_fsx_ontap_file_system.terraform-fsxn.id + name = var.dr_svm_name + root_volume_security_style = var.dr_root_vol_sec_style +} + +data "netapp-ontap_storage_volume_data_source" "src_vols" { + for_each = toset(var.list_of_volumes_to_replicate) + cx_profile_name = var.prime_clus_name + svm_name = var.prime_svm + name = each.value +} + +variable "size_in_mb" { + type = map(string) + + # Conversion to MBs + default = { + "mb" = 1 + "MB" = 1 + "gb" = 1024 + "GB" = 1024 + "tb" = 1024*1024 + "TB" = 1024*1024 + } +} + + +resource "aws_fsx_ontap_volume" "dp_volumes" { + for_each = data.netapp-ontap_storage_volume_data_source.src_vols + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.mysvm.id + name = "${each.value.name}_dp" + ontap_volume_type = "DP" + size_in_megabytes = each.value.space.size * lookup(var.size_in_mb, each.value.space.size_unit, 0) + tiering_policy { + name = "ALL" + } + skip_final_backup = true +} + +# Now that we have the DP volumes created on the newly deployed destination cluster, +# let's get the intercluster LIFs so we can peer the clusters. + + +# For now let's try to get the source and destination IC LIFs via AWS TF provider. +data "aws_fsx_ontap_file_system" "source_fsxn" { + provider = aws.prime-aws-region + id = var.prime_fsxid +} + +# Now udse the LIF names and IP addresses to peer the clusters + +resource "netapp-ontap_cluster_peers_resource" "cluster_peer" { + cx_profile_name = var.prime_clus_name # Source cluster profile + peer_cx_profile_name = var.dr_clus_name # Destination (peer) cluster profile + + remote = { + # Destination cluster (DR) intercluster LIF IPs + ip_addresses = aws_fsx_ontap_file_system.terraform-fsxn.endpoints[0].intercluster[0].ip_addresses + } + + source_details = { + # Source cluster (primary) intercluster LIF IPs + ip_addresses = data.aws_fsx_ontap_file_system.source_fsxn.endpoints[0].intercluster[0].ip_addresses + } + + # Optional: Add authentication, passphrase or any other required settings + # passphrase = var.cluster_peer_passphrase # Optional, if you use passphrase for peering + peer_applications = ["snapmirror"] +} + +resource "netapp-ontap_svm_peers_resource" "peer_svms" { + cx_profile_name = var.dr_clus_name + svm = { + name = aws_fsx_ontap_storage_virtual_machine.mysvm.name + } + peer = { + svm = { + name = var.prime_svm + } + cluster = { + name = var.prime_cluster_vserver + } + peer_cx_profile_name = var.prime_clus_name + } + applications = ["snapmirror", "flexcache"] + depends_on = [ + netapp-ontap_cluster_peers_resource.cluster_peer + ] +} + +locals { + dr_retention_parsed = jsondecode(var.dr_retention) +} + +resource "netapp-ontap_snapmirror_policy_resource" "snapmirror_policy_async" { + # required to know which system to interface with + cx_profile_name = var.dr_clus_name + name = var.dr_snapmirror_policy_name + svm_name = aws_fsx_ontap_storage_virtual_machine.mysvm.name + type = "async" + transfer_schedule_name = var.dr_transfer_schedule + retention = local.dr_retention_parsed +} + + +resource "netapp-ontap_snapmirror_resource" "snapmirror" { + for_each = data.netapp-ontap_storage_volume_data_source.src_vols + cx_profile_name = var.dr_clus_name + source_endpoint = { + path = join(":",[var.prime_svm,each.value.name]) + } + destination_endpoint = { + path = join(":",[aws_fsx_ontap_storage_virtual_machine.mysvm.name, "${each.value.name}_dp"]) + } + policy = { + name = netapp-ontap_snapmirror_policy_resource.snapmirror_policy_async.name + } + depends_on = [ + netapp-ontap_svm_peers_resource.peer_svms, + aws_fsx_ontap_volume.dp_volumes + ] +} diff --git a/Terraform/fsxn-replicate/output.tf b/Terraform/fsxn-replicate/output.tf new file mode 100644 index 0000000..c7e7ac8 --- /dev/null +++ b/Terraform/fsxn-replicate/output.tf @@ -0,0 +1,42 @@ +#output "volume_details" { +# value = { +# for key, volume in data.netapp-ontap_storage_volume_data_source.src_vols : key => { +# name = volume.name +# type = volume.type +# size = "${volume.space.size}${volume.space.size_unit}" +# } +# } +# description = "Details of the volumes including name, type, size, and size unit" +#} + +#output "data_from_aws_fsxn" { +# value = { +# all_of_it = data.aws_fsx_ontap_file_system.source_fsxn +# } +# description = "All data from aws fsxn provider" +#} + + +output "dr_fsxn_system" { + value = { + cluster_mgmt_ip = aws_fsx_ontap_file_system.terraform-fsxn.endpoints[0].management[0].ip_addresses + } + description = "Cluster management IP address of the created DR cluster" +} + +#output "replication_relationships" { +# value = { +# full_data = netapp-ontap_snapmirror_resource.snapmirror +# } +# description = "Replication relationships" +#} + +output "snapmirror_details" { + value = { for id, snapmirror in netapp-ontap_snapmirror_resource.snapmirror : id => { + source_path = snapmirror.source_endpoint.path + destination_path = snapmirror.destination_endpoint.path + policy_name = snapmirror.policy.name + }} + description = "A map of all snapmirror details." +} + diff --git a/Terraform/fsxn-replicate/security_groups.tf b/Terraform/fsxn-replicate/security_groups.tf new file mode 100644 index 0000000..3de9588 --- /dev/null +++ b/Terraform/fsxn-replicate/security_groups.tf @@ -0,0 +1,279 @@ +/* + * The following defines a Security Group for FSx ONTAP that allows the required ports for NFS, CIFS, + * Kerberos, and iSCSI as well as SnapMirror. + * + * While you don't have to use this SG, one will need to be assigned to the FSx ONTAP file system, + * otherwise it won't be able to communicate with the clients. + * + * To not create the security group, set the variable create_sg to false in the variables.tf file. + * Will will also need to set the security_group_id to the ID of the security group you want to use + * in the variables.tf file. + * + */ + +locals { + mycount = var.dr_create_sg ? 1 : 0 + my_ref_sec_group_id = (var.dr_source_sg_id != "" ? var.dr_source_sg_id : null) + my_cidr = (var.dr_cidr_for_sg != "" ? var.dr_cidr_for_sg : null) +} + +resource "aws_security_group" "fsx_sg" { + description = "Allow FSx ONTAP required ports" + count = local.mycount + name_prefix = var.dr_security_group_name_prefix + vpc_id = var.dr_vpc_id +} + +# locals { +# my_security_group_id = aws_security_group.fsx_sg[count.index].id +# } + +resource "aws_vpc_security_group_ingress_rule" "all_icmp" { + description = "Allow all ICMP traffic" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = -1 + to_port = -1 + ip_protocol = "icmp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_tcp" { + description = "Remote procedure call for NFS" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 111 + to_port = 111 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_udp" { + description = "Remote procedure call for NFS" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 111 + to_port = 111 + ip_protocol = "udp" +} + +resource "aws_vpc_security_group_ingress_rule" "cifs" { + description = "NetBIOS service session for CIFS" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 139 + to_port = 139 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "snmp_tcp" { + description = "Simple network management protocol for log collection" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 161 + to_port = 162 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "snmp_udp" { + description = "Simple network management protocol for log collection" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 161 + to_port = 162 + ip_protocol = "udp" +} + +resource "aws_vpc_security_group_ingress_rule" "smb_cifs" { + description = "Microsoft SMB/CIFS over TCP with NetBIOS framing" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 445 + to_port = 445 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_mount_tcp" { + description = "NFS mount" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 635 + to_port = 635 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "kerberos" { + description = "Kerberos authentication" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 749 + to_port = 749 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_server_daemon" { + description = "NFS server daemon" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 2049 + to_port = 2049 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_server_daemon_udp" { + description = "NFS server daemon" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 2049 + to_port = 2049 + ip_protocol = "udp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_lock_daemon" { + description = "NFS lock daemon" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 4045 + to_port = 4045 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_lock_daemon_udp" { + description = "NFS lock daemon" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 4045 + to_port = 4045 + ip_protocol = "udp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_status_monitor" { + description = "Status monitor for NFS" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 4046 + to_port = 4046 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_status_monitor_udp" { + description = "Status monitor for NFS" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 4046 + to_port = 4046 + ip_protocol = "udp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_rquotad" { + description = "Remote quota server for NFS" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 4049 + to_port = 4049 + ip_protocol = "udp" +} + +resource "aws_vpc_security_group_ingress_rule" "iscsi_tcp" { + description = "iSCSI" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 3260 + to_port = 3260 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "Snapmirror_Intercluster_communication" { + description = "Snapmirror Intercluster communication" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 11104 + to_port = 11104 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "Snapmirror_data_transfer" { + description = "Snapmirror data transfer" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 11105 + to_port = 11105 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "nfs_mount_udp" { + description = "NFS mount" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 635 + to_port = 635 + ip_protocol = "udp" +} + +resource "aws_vpc_security_group_ingress_rule" "ssh" { + description = "ssh" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 22 + to_port = 22 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_ingress_rule" "s3_and_api" { + description = "Provice acccess to S3 and the ONTAP REST API" + count = local.mycount + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = local.my_cidr + referenced_security_group_id = local.my_ref_sec_group_id + from_port = 443 + to_port = 443 + ip_protocol = "tcp" +} + +resource "aws_vpc_security_group_egress_rule" "allow_all_traffic" { + count = local.mycount + description = "Allow all out bound traffic" + security_group_id = aws_security_group.fsx_sg[count.index].id + cidr_ipv4 = "0.0.0.0/0" + ip_protocol = "-1" +} diff --git a/Terraform/fsxn-replicate/terraform.sample.tfvars b/Terraform/fsxn-replicate/terraform.sample.tfvars new file mode 100644 index 0000000..caad928 --- /dev/null +++ b/Terraform/fsxn-replicate/terraform.sample.tfvars @@ -0,0 +1,24 @@ +# Variables for my environment. Source is Development FSxN system. + +# Primary FSxN variables +prime_hostname = "" +prime_fsxid = "fs-xxxxxxxxxxxxxxxxx" +prime_svm = "fsx" +prime_cluster_vserver = "FsxIdxxxxxxxxxxxxxxxx" +prime_aws_region = "us-west-2" +username_pass_secrets_id = "" +list_of_volumes_to_replicate = ["vol1", "vol2", "vol3"] + +# DR FSxN variables +dr_aws_region = "us-west-2" +dr_fsx_name = "terraform-dr-fsxn" +dr_fsx_subnets = { + "primarysub" = "subnet-11111111" + "secondarysub" = "subnet-33333333" + } +dr_svm_name = "fsx_dr" +dr_security_group_name_prefix = "fsxn-sg" +dr_vpc_id = "vpc-xxxxxxxx" +dr_username_pass_secrets_id = "" +dr_snapmirror_policy_name = "" +dr_retention = "[{ \"label\": \"weekly\", \"count\": 4 }, { \"label\": \"daily\", \"count\": 7 }]"