From 4152f0aa9951d4c24ff88548dcfcf69edb33398e Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 27 Aug 2024 18:29:06 -0400 Subject: [PATCH] Configure Manila with an NFS network Manila Tempest tests need to connect to the NFS share for Ganesha tests, and they use a special (openstack) network for that [1]. This patch adds an NFS network with VLAN 24 and range 172.21.0.0/24 in reproducers networking-definition.yml. It also adds a multus range for this network so that the Tempest pod can access this network for testing. The NFS network is added to the OCP nodes for the same reason. The podified-multinode-hci-deployment-crc job is updated so that tempest pod is attached to the storage network since the storage network is the fallback network if the NFS network is not defined in a job (the NFS network cannot be used in this job since [2] depends on this patch). This patch allows the RGW VIP and NFS VIP to not be the same. The variables cifmw_cephadm_rgw_network, cifmw_cephadm_rgw_vip, cifmw_cephadm_nfs_network and cifmw_cephadm_nfs_vip are now used instead the shared cifmw_cephadm_vip variable. This patch updates playbook manila_create_default_resources.yml so that when CI for manila runs, a provider network is created. Variables manila_provider_network_{name,vlan,start,end,range} default to the storage network, but can be overridden to the NFS network within a job definition. [1] https://opendev.org/openstack/manila-tempest-plugin/src/branch/master/manila_tempest_tests/config.py#L99 [2] https://github.com/openstack-k8s-operators/architecture/pull/405 Jira: https://issues.redhat.com/browse/OSPRH-7417 Signed-off-by: John Fulton --- .../manila_create_default_resources.yml | 24 ++++ playbooks/ceph.yml | 118 ++++++++++++++---- roles/cifmw_cephadm/README.md | 15 ++- roles/cifmw_cephadm/tasks/check_vip.yml | 4 +- .../cifmw_cephadm/tasks/configure_object.yml | 4 +- roles/cifmw_cephadm/tasks/rgw.yml | 4 +- roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 | 2 +- .../reproducers/networking-definition.yml | 17 +++ zuul.d/edpm_multinode.yaml | 12 +- 9 files changed, 162 insertions(+), 38 deletions(-) diff --git a/hooks/playbooks/manila_create_default_resources.yml b/hooks/playbooks/manila_create_default_resources.yml index df4caac499..1a02215b2e 100644 --- a/hooks/playbooks/manila_create_default_resources.yml +++ b/hooks/playbooks/manila_create_default_resources.yml @@ -8,7 +8,31 @@ extra_specs: snapshot_support: "True" create_share_from_snapshot_support: "True" + manila_provider_network_name: storage + manila_provider_network_vlan: 21 + manila_provider_network_start: 172.18.0.150 + manila_provider_network_end: 172.18.0.200 + manila_provider_network_range: 172.18.0.0/24 tasks: + - name: Create Manila provider network with Neutron for instance to access Manila + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: | + oc -n {{ namespace }} exec -it pod/openstackclient -- {{ item }} + loop: + - "openstack network create {{ manila_provider_network_name }} --share --provider-network-type vlan --provider-physical-network datacentre --provider-segment {{ manila_provider_network_vlan }}" + - "openstack subnet create --allocation-pool start={{ manila_provider_network_start }},end={{ manila_provider_network_end }} --dhcp --network {{ manila_provider_network_name }} --subnet-range {{ manila_provider_network_range }} --gateway none {{ manila_provider_network_name }}-subnet" + register: _manila_provider_network_creation + failed_when: >- + ( _manila_provider_network_creation.rc | int ) != 0 + when: + - manila_provider_network_name | length > 0 + - manila_provider_network_vlan | length > 0 + - manila_provider_network_start | length > 0 + - manila_provider_network_end | length > 0 + - manila_provider_network_range | length > 0 + - name: Create share type default for manila tempest plugin tests environment: KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 7ad7b219cb..f4e3c8f408 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -339,36 +339,114 @@ # public network always exist because is provided by the ceph_spec role - name: Get Storage network range ansible.builtin.set_fact: - cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" + cifmw_cephadm_storage_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" - name: Set IP address of first monitor ansible.builtin.set_fact: - cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first }}" + cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | first }}" vars: this_host: "{{ _target_hosts | first }}" - name: Assert if any EDPM nodes n/w interface is missing in storage network ansible.builtin.assert: that: - - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | length > 0 - fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_rgw_network }}" + - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | length > 0 + fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_storage_network }}" loop: "{{ _target_hosts }}" - - name: Get already assigned IP addresses - ansible.builtin.set_fact: - ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first ] }}" - loop: "{{ _target_hosts }}" + - name: Set RGW Network Properties + when: + - cifmw_ceph_daemons_layout.rgw_enabled | default(true) | bool + block: + - name: Set RGW network range to storage network only if it was not provided + ansible.builtin.set_fact: + cifmw_cephadm_rgw_network: "{{ cifmw_cephadm_storage_network }}" + when: + - cifmw_cephadm_rgw_network is not defined or + cifmw_cephadm_rgw_network | length == 0 + + - name: Assert if any EDPM nodes n/w interface is missing in RGW network + ansible.builtin.assert: + that: + - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | length > 0 + fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_rgw_network }}" + loop: "{{ _target_hosts }}" + when: + - cifmw_cephadm_rgw_network != cifmw_cephadm_storage_network - # cifmw_cephadm_vip is the VIP reserved in the Storage network - - name: Set VIP var as empty string - ansible.builtin.set_fact: - cifmw_cephadm_vip: "" + - name: Get already assigned RGW IP addresses + ansible.builtin.set_fact: + ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first ] }}" + loop: "{{ _target_hosts }}" - - name: Process VIP - ansible.builtin.include_role: - name: cifmw_cephadm - tasks_from: check_vip - loop: "{{ range(1, (ips | length) + 1) | list }}" + - name: Set VIP var as empty string + ansible.builtin.set_fact: + cifmw_cephadm_vip: "" + when: + - cifmw_cephadm_rgw_vip is undefined + + - name: Get RGW VIP + ansible.builtin.include_role: + name: cifmw_cephadm + tasks_from: check_vip + loop: "{{ range(1, (ips | length) + 1) | list }}" + vars: + cifmw_cephadm_vip_network: "{{ cifmw_cephadm_rgw_network | default(storage_network_range, true) | default(ssh_network_range, true) }}" + when: + - cifmw_cephadm_rgw_vip is undefined + + - name: Set RGW VIP + ansible.builtin.set_fact: + cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" + when: + - cifmw_cephadm_rgw_vip is undefined + + - name: Set NFS Network Properties + when: + - cifmw_ceph_daemons_layout.ceph_nfs_enabled | default(false) | bool + block: + - name: Set NFS network range to storage network only if it was not provided + ansible.builtin.set_fact: + cifmw_cephadm_nfs_network: "{{ cifmw_cephadm_storage_network }}" + when: + - cifmw_cephadm_nfs_network is not defined or + cifmw_cephadm_nfs_network | length == 0 + + - name: Assert if any EDPM nodes n/w interface is missing in NFS network + ansible.builtin.assert: + that: + - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_nfs_network) | length > 0 + fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_nfs_network }}" + loop: "{{ _target_hosts }}" + when: + - cifmw_cephadm_nfs_network != cifmw_cephadm_storage_network + + - name: Get already assigned NFS IP addresses + ansible.builtin.set_fact: + ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_nfs_network) | first ] }}" + loop: "{{ _target_hosts }}" + + - name: Set VIP var as empty string + ansible.builtin.set_fact: + cifmw_cephadm_vip: "" + when: + - cifmw_cephadm_nfs_vip is undefined + + - name: Get NFS VIP + ansible.builtin.include_role: + name: cifmw_cephadm + tasks_from: check_vip + loop: "{{ range(1, (ips | length) + 1) | list }}" + vars: + cifmw_cephadm_vip_network: "{{ cifmw_cephadm_nfs_network | default(storage_network_range, true) | default(ssh_network_range, true) }}" + when: + - cifmw_cephadm_nfs_vip is undefined + + - name: Set NFS VIP + ansible.builtin.set_fact: + cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" + when: + - cifmw_cephadm_nfs_vip is undefined tasks: - name: Satisfy Ceph prerequisites @@ -401,9 +479,6 @@ ansible.builtin.import_role: name: cifmw_cephadm tasks_from: rgw - vars: - # cifmw_cephadm_vip is computed or passed as an override via -e @extra.yml - cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" - name: Configure Monitoring Stack when: cifmw_ceph_daemons_layout.dashboard_enabled | default(false) | bool @@ -427,9 +502,6 @@ ansible.builtin.import_role: name: cifmw_cephadm tasks_from: cephnfs - vars: - # we reuse the same VIP reserved for rgw - cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" - name: Create Cephx Keys for OpenStack ansible.builtin.import_role: diff --git a/roles/cifmw_cephadm/README.md b/roles/cifmw_cephadm/README.md index 661681dbae..c13de3ede7 100644 --- a/roles/cifmw_cephadm/README.md +++ b/roles/cifmw_cephadm/README.md @@ -77,18 +77,21 @@ need to be changed for a typical EDPM deployment. is gathered from the `cifmw_cephadm_bootstrap_conf` file, which represents the initial Ceph configuration file passed at bootstrap time. -* `cifmw_cephadm_rgw_network`: the Ceph `public_network` where the `radosgw` - instances should be bound. The network range is gathered from the - `cifmw_cephadm_bootstrap_conf` file, which represents the initial Ceph - configuration file passed at bootstrap time. +* `cifmw_cephadm_rgw_network`: The network where the `radosgw` + instances will be bound. If this value is not passed then the Ceph + `public_network` which represents the initial Ceph configuration + file passed at bootstrap time. + +* `cifmw_cephadm_nfs_network`: The network for NFS `ganesha`. If this + value is not passed then the Ceph `public_network` which represents + the initial Ceph configuration file passed at bootstrap time. * `cifmw_cephadm_rgw_vip`: the ingress daemon deployed along with `radosgw` requires a `VIP` that will be owned by `keepalived`. This IP address will be used as entry point to reach the `radosgw backends` through `haproxy`. * `cifmw_cephadm_nfs_vip`: the ingress daemon deployed along with the `nfs` - cluster requires a `VIP` that will be owned by `keepalived`. This IP - address is the same used for rgw unless an override is passed, and it's + cluster requires a `VIP` that will be owned by `keepalived`. This IP is used as entry point to reach the `ganesha backends` through an `haproxy` instance where proxy-protocol is enabled. diff --git a/roles/cifmw_cephadm/tasks/check_vip.yml b/roles/cifmw_cephadm/tasks/check_vip.yml index 0714510e7a..d7a5f39f45 100644 --- a/roles/cifmw_cephadm/tasks/check_vip.yml +++ b/roles/cifmw_cephadm/tasks/check_vip.yml @@ -22,9 +22,9 @@ ansible.builtin.set_fact: count: "{{ 2 if count is undefined else count | int + 2 }}" - - name: Get an IP address from the Storage network + - name: Get an IP address from the VIP network ansible.builtin.set_fact: - cur_ip: "{{ cifmw_cephadm_rgw_network | ansible.utils.next_nth_usable(count) }}" + cur_ip: "{{ cifmw_cephadm_vip_network | ansible.utils.next_nth_usable(count) }}" - name: Reserve VIP if the address is available ansible.builtin.set_fact: diff --git a/roles/cifmw_cephadm/tasks/configure_object.yml b/roles/cifmw_cephadm/tasks/configure_object.yml index 2540fb1717..db1da9e2ba 100644 --- a/roles/cifmw_cephadm/tasks/configure_object.yml +++ b/roles/cifmw_cephadm/tasks/configure_object.yml @@ -92,8 +92,8 @@ script: |- oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack role add --user {{ all_uuids.results.0.stdout }} --project {{ project_service_uuid.stdout }} {{ all_uuids.results.2.stdout }} oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack role add --user {{ all_uuids.results.0.stdout }} --project {{ project_service_uuid.stdout }} {{ all_uuids.results.3.stdout }} - oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack endpoint create --region regionOne {{ all_uuids.results.1.stdout }} public {{ cifmw_cephadm_urischeme }}://{{ cifmw_external_dns_vip_ext.values() | first if cifmw_external_dns_vip_ext is defined else cifmw_cephadm_vip }}:8080/swift/v1/AUTH_%\(tenant_id\)s - oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack endpoint create --region regionOne {{ all_uuids.results.1.stdout }} internal {{ cifmw_cephadm_urischeme }}://{{ cifmw_external_dns_vip_int.values() | first if cifmw_external_dns_vip_int is defined else cifmw_cephadm_vip }}:8080/swift/v1/AUTH_%\(tenant_id\)s + oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack endpoint create --region regionOne {{ all_uuids.results.1.stdout }} public {{ cifmw_cephadm_urischeme }}://{{ cifmw_external_dns_vip_ext.values() | first if cifmw_external_dns_vip_ext is defined else cifmw_cephadm_rgw_vip }}:8080/swift/v1/AUTH_%\(tenant_id\)s + oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack endpoint create --region regionOne {{ all_uuids.results.1.stdout }} internal {{ cifmw_cephadm_urischeme }}://{{ cifmw_external_dns_vip_int.values() | first if cifmw_external_dns_vip_int is defined else cifmw_cephadm_rgw_vip }}:8080/swift/v1/AUTH_%\(tenant_id\)s oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack role add --project {{ all_uuids.results.4.stdout }} --user {{ all_uuids.results.5.stdout }} {{ all_uuids.results.6.stdout }} delegate_to: localhost when: diff --git a/roles/cifmw_cephadm/tasks/rgw.yml b/roles/cifmw_cephadm/tasks/rgw.yml index 4a600c1867..f50f5174c0 100644 --- a/roles/cifmw_cephadm/tasks/rgw.yml +++ b/roles/cifmw_cephadm/tasks/rgw.yml @@ -27,12 +27,12 @@ - name: Define cifmw_external_dns_vip_ext ansible.builtin.set_fact: cifmw_external_dns_vip_ext: "{{ cifmw_external_dns_vip_ext | default({}) | - combine({ (cifmw_cephadm_vip): 'rgw-external.ceph.local' }) }}" + combine({ (cifmw_cephadm_rgw_vip): 'rgw-external.ceph.local' }) }}" - name: Define cifmw_external_dns_vip_int ansible.builtin.set_fact: cifmw_external_dns_vip_int: "{{ cifmw_external_dns_vip_ext | default({}) | - combine({ (cifmw_cephadm_vip): 'rgw-internal.ceph.local' }) }}" + combine({ (cifmw_cephadm_rgw_vip): 'rgw-internal.ceph.local' }) }}" - name: Create DNS domain and certificate ansible.builtin.include_role: diff --git a/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 b/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 index dbf42ae222..ff01d39550 100644 --- a/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 +++ b/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 @@ -31,7 +31,7 @@ spec: monitor_port: 8999 virtual_interface_networks: - {{ cifmw_cephadm_rgw_network }} - virtual_ip: {{ cifmw_cephadm_vip }} + virtual_ip: {{ cifmw_cephadm_rgw_vip }} {% if rgw_frontend_cert is defined %} ssl_cert: | {{ rgw_frontend_cert | indent( width=6 ) }} diff --git a/scenarios/reproducers/networking-definition.yml b/scenarios/reproducers/networking-definition.yml index 4f0ba0ed26..351e25bec1 100644 --- a/scenarios/reproducers/networking-definition.yml +++ b/scenarios/reproducers/networking-definition.yml @@ -91,6 +91,19 @@ cifmw_networking_definition: end: 250 vlan: 23 mtu: 1500 + nfs: + network: "172.21.0.0/24" + tools: + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + vlan: 24 + mtu: 1500 group-templates: ocps: @@ -106,6 +119,8 @@ cifmw_networking_definition: trunk-parent: ctlplane storage: trunk-parent: ctlplane + nfs: + trunk-parent: ctlplane ocp_workers: network-template: range: @@ -127,6 +142,8 @@ cifmw_networking_definition: trunk-parent: ctlplane storagemgmt: trunk-parent: ctlplane + nfs: + trunk-parent: ctlplane cephs: network-template: range: diff --git a/zuul.d/edpm_multinode.yaml b/zuul.d/edpm_multinode.yaml index 32465a3cbe..c2c18931ee 100644 --- a/zuul.d/edpm_multinode.yaml +++ b/zuul.d/edpm_multinode.yaml @@ -339,10 +339,18 @@ - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/hci_ceph_backends.yml' cifmw_ceph_daemons_layout: - rgw_enabled: true + rgw_enabled: false dashboard_enabled: true - cephfs_enabled: true + cephfs_enabled: false ceph_nfs_enabled: true + cifmw_test_operator_tempest_network_attachments: + - ctlplane + - storage + manila_provider_network_name: "" + manila_provider_network_vlan: "" + manila_provider_network_start: "" + manila_provider_network_end: "" + manila_provider_network_range: "" files: - ^hooks/playbooks/control_plane_ceph_backends.yml - ^hooks/playbooks/control_plane_hci_pre_deploy.yml