From 9ce398e8ef8a20ffdfcdfa16bce35a422b31da40 Mon Sep 17 00:00:00 2001 From: oriyarde <57443811+oriyarde@users.noreply.github.com> Date: Thu, 30 Sep 2021 21:01:34 +0300 Subject: [PATCH] GA v1.7.0 merge develop to master branch (#381) --- Dockerfile-csi-controller | 6 +- Dockerfile-csi-controller.test | 2 +- Dockerfile-csi-node | 4 +- Dockerfile-csi-test | 2 +- NOTICES | 12 +- README.md | 404 +----- common/config.yaml | 2 +- controller/array_action/array_action_types.py | 49 +- .../array_action/array_mediator_abstract.py | 2 +- .../array_action/array_mediator_ds8k.py | 196 +-- .../array_action/array_mediator_interface.py | 123 +- controller/array_action/array_mediator_svc.py | 393 +++++- controller/array_action/array_mediator_xiv.py | 73 +- controller/array_action/config.py | 3 + controller/array_action/errors.py | 8 +- controller/array_action/messages.py | 2 +- controller/common/node_info.py | 2 +- controller/controller_server/addons_server.py | 161 +++ controller/controller_server/config.py | 12 +- .../controller_server_manager.py | 48 + .../controller_server/controller_types.py | 1 + .../csi_controller_server.py | 449 +++---- .../controller_server/exception_handler.py | 24 +- controller/controller_server/main.py | 20 + controller/controller_server/messages.py | 12 +- controller/controller_server/test_settings.py | 7 + controller/controller_server/utils.py | 226 +++- controller/csi_general/replication_pb2.py | 1092 +++++++++++++++++ .../csi_general/replication_pb2_grpc.py | 116 ++ controller/scripts/csi_pb2.sh | 23 +- controller/scripts/entrypoint.sh | 2 +- controller/scripts/lint.ini | 6 +- controller/scripts/unitests.sh | 5 +- .../ds8k/test_array_mediator_ds8k.py | 219 ++-- .../svc/array_mediator_svc_test.py | 285 +++-- .../xiv/array_mediator_xiv_tests.py | 107 +- .../controller_server/addons_server_test.py | 59 + .../csi_controller_server_test.py | 597 +++++---- .../tests/controller_server/utils_test.py | 158 ++- controller/tests/utils.py | 40 +- .../examples/demo-pvc-file-system.yaml | 2 +- .../examples/demo-pvc-from-snapshot.yaml | 2 +- .../examples/demo-secret-config.json | 24 + .../examples/demo-snapshotclass.yaml | 11 - .../demo-statefulset-file-system.yaml | 27 - .../examples/demo-statefulset-raw-block.yaml | 27 - ...et-combined.yaml => demo-statefulset.yaml} | 2 +- .../demo-storageclass-config-secret.yaml | 19 + .../examples/demo-storageclass.yaml | 15 +- .../examples/demo-volumereplication.yaml | 12 + .../examples/demo-volumereplicationclass.yaml | 12 + ...snapshot.yaml => demo-volumesnapshot.yaml} | 4 +- ...emo-volumesnapshotclass-config-secret.yaml | 17 + .../examples/demo-volumesnapshotclass.yaml | 13 + docs/SUMMARY.md | 16 +- ...block_storage_CSI_driver_1.7.0_RN.ditamap} | 5 +- ...block_storage_CSI_driver_1.7.0_UG.ditamap} | 5 +- docs/book_files/csi_block_storage_kc_pdfs.md | 6 +- docs/book_files/csi_block_storage_kc_rn.md | 2 +- .../csi_block_storage_kc_welcome.md | 2 +- .../csi_block_storage_kc_whatsnew.md | 4 +- docs/book_files/csi_rn_content.ditamap | 5 +- docs/book_files/csi_rn_edition_notice.dita | 4 +- docs/book_files/csi_ug_content.ditamap | 30 +- docs/content/configuration/csi_ug_config.md | 26 +- .../csi_ug_config_advanced_importvol.md | 215 ++-- .../configuration/csi_ug_config_create_pvc.md | 146 ++- .../csi_ug_config_create_replication.md | 50 + .../csi_ug_config_create_secret.md | 37 +- .../csi_ug_config_create_secret_topology.md | 46 + .../csi_ug_config_create_snapshots.md | 17 +- .../csi_ug_config_create_statefulset.md | 191 ++- .../csi_ug_config_create_storageclasses.md | 53 +- ...g_config_create_storageclasses_topology.md | 46 + ...i_ug_config_create_vol_replicationclass.md | 33 + .../csi_ug_config_create_vol_snapshotclass.md | 24 +- ...onfig_create_vol_snapshotclass_topology.md | 44 + .../configuration/csi_ug_config_expand_pvc.md | 4 +- ...csi_ug_config_replication_find_systemid.md | 8 + .../configuration/csi_ug_config_topology.md | 9 + docs/content/csi_overview.md | 4 +- .../csi_ug_install_operator_github.md | 32 +- .../csi_ug_install_operator_openshift.md | 4 +- .../csi_ug_install_operator_operatorhub.md | 2 +- .../installation/csi_ug_requirements.md | 62 +- .../installation/csi_ug_uninstall_github.md | 8 +- .../csi_ug_uninstall_operatorhub.md | 2 +- docs/content/installation/csi_ug_upgrade.md | 2 +- .../release_notes/csi_rn_changelog_1.5.1.md | 3 + .../release_notes/csi_rn_changelog_1.6.0.md | 2 +- .../release_notes/csi_rn_changelog_1.7.0.md | 6 + .../release_notes/csi_rn_compatibility.md | 2 +- .../release_notes/csi_rn_edition_notice.md | 4 +- .../release_notes/csi_rn_knownissues.md | 7 +- .../release_notes/csi_rn_limitations.md | 18 +- .../csi_rn_supported_orchestration.md | 9 +- .../release_notes/csi_rn_supported_os.md | 7 +- .../release_notes/csi_rn_supported_storage.md | 10 +- docs/content/release_notes/csi_rn_whatsnew.md | 20 +- .../csi_ug_troubleshooting_detect_errors.md | 21 - .../csi_ug_troubleshooting_logs.md | 28 +- .../csi_ug_troubleshooting_misc.md | 2 +- .../csi_ug_troubleshooting_misc.md.dcsbackup | 46 - .../csi_ug_troubleshooting_misc.md.tminfo | 10 - .../csi_ug_troubleshooting_node_crash.md | 56 +- docs/content/using/csi_ug_using_sample.md | 32 +- .../device_connectivity_helper_scsigeneric.go | 14 +- node/pkg/driver/node.go | 7 +- node/pkg/driver/node_test.go | 13 +- node/pkg/driver/node_utils.go | 30 +- node/pkg/driver/node_utils_test.go | 11 +- node/pkg/driver/version_test.go | 4 +- reusables/doc-resources.md | 22 - .../ci/{jenkins_pipeline_csi => Jenkinsfile} | 5 + .../ci/jenkins_pipeline_community_csi_test | 83 -- scripts/ci/run_csi_test_client.sh | 8 +- scripts/csi_test/common_csi_tests_to_skip | 5 + .../csi_test/community_a9k_csi_tests_to_skip | 0 .../csi_test/community_ds8k_csi_tests_to_skip | 1 + .../csi_test/community_svc_csi_tests_to_skip | 4 + scripts/csi_test/csi_params | 2 +- scripts/csi_test/csi_tests_to_run | 18 - scripts/csi_test/entrypoint-csi-tests.sh | 6 +- 123 files changed, 4455 insertions(+), 2344 deletions(-) create mode 100644 controller/controller_server/addons_server.py create mode 100644 controller/controller_server/controller_server_manager.py create mode 100644 controller/controller_server/main.py create mode 100644 controller/csi_general/replication_pb2.py create mode 100644 controller/csi_general/replication_pb2_grpc.py create mode 100644 controller/tests/controller_server/addons_server_test.py create mode 100644 deploy/kubernetes/examples/demo-secret-config.json delete mode 100644 deploy/kubernetes/examples/demo-snapshotclass.yaml delete mode 100644 deploy/kubernetes/examples/demo-statefulset-file-system.yaml delete mode 100644 deploy/kubernetes/examples/demo-statefulset-raw-block.yaml rename deploy/kubernetes/examples/{demo-statefulset-combined.yaml => demo-statefulset.yaml} (96%) create mode 100644 deploy/kubernetes/examples/demo-storageclass-config-secret.yaml create mode 100644 deploy/kubernetes/examples/demo-volumereplication.yaml create mode 100644 deploy/kubernetes/examples/demo-volumereplicationclass.yaml rename deploy/kubernetes/examples/{demo-snapshot.yaml => demo-volumesnapshot.yaml} (64%) create mode 100644 deploy/kubernetes/examples/demo-volumesnapshotclass-config-secret.yaml create mode 100644 deploy/kubernetes/examples/demo-volumesnapshotclass.yaml rename docs/book_files/{IBM_block_storage_CSI_driver_1.6.0_RN.ditamap => IBM_block_storage_CSI_driver_1.7.0_RN.ditamap} (92%) rename docs/book_files/{IBM_block_storage_CSI_driver_1.6.0_UG.ditamap => IBM_block_storage_CSI_driver_1.7.0_UG.ditamap} (94%) create mode 100644 docs/content/configuration/csi_ug_config_create_replication.md create mode 100644 docs/content/configuration/csi_ug_config_create_secret_topology.md create mode 100644 docs/content/configuration/csi_ug_config_create_storageclasses_topology.md create mode 100644 docs/content/configuration/csi_ug_config_create_vol_replicationclass.md create mode 100644 docs/content/configuration/csi_ug_config_create_vol_snapshotclass_topology.md create mode 100644 docs/content/configuration/csi_ug_config_replication_find_systemid.md create mode 100644 docs/content/configuration/csi_ug_config_topology.md create mode 100644 docs/content/release_notes/csi_rn_changelog_1.5.1.md create mode 100644 docs/content/release_notes/csi_rn_changelog_1.7.0.md delete mode 100644 docs/content/troubleshooting/csi_ug_troubleshooting_detect_errors.md delete mode 100644 docs/content/troubleshooting/csi_ug_troubleshooting_misc.md.dcsbackup delete mode 100644 docs/content/troubleshooting/csi_ug_troubleshooting_misc.md.tminfo delete mode 100644 reusables/doc-resources.md rename scripts/ci/{jenkins_pipeline_csi => Jenkinsfile} (93%) delete mode 100644 scripts/ci/jenkins_pipeline_community_csi_test create mode 100644 scripts/csi_test/common_csi_tests_to_skip create mode 100644 scripts/csi_test/community_a9k_csi_tests_to_skip create mode 100644 scripts/csi_test/community_ds8k_csi_tests_to_skip create mode 100644 scripts/csi_test/community_svc_csi_tests_to_skip delete mode 100644 scripts/csi_test/csi_tests_to_run diff --git a/Dockerfile-csi-controller b/Dockerfile-csi-controller index aba7bb029..9d371c215 100644 --- a/Dockerfile-csi-controller +++ b/Dockerfile-csi-controller @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM registry.access.redhat.com/ubi8/python-38:1-60 +FROM registry.access.redhat.com/ubi8/python-38:1-68 MAINTAINER IBM Storage -ARG VERSION=1.6.0 +ARG VERSION=1.7.0 ARG BUILD_NUMBER=0 ###Required Labels @@ -33,8 +33,6 @@ COPY controller/requirements.txt /driver/controller/ RUN pip3 install --default-timeout=100 --upgrade pip==19.3.1 # avoid default boringssl lib, since it does not support z systems ENV GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True -# TODO: remove CRYPTOGRAPHY_ALLOW_OPENSSL_102 when upgrading to ubi8 -ENV CRYPTOGRAPHY_ALLOW_OPENSSL_102=true ENV CRYPTOGRAPHY_DONT_BUILD_RUST=1 RUN pip3 install -r /driver/controller/requirements.txt diff --git a/Dockerfile-csi-controller.test b/Dockerfile-csi-controller.test index 1a369c3ec..2c4e616f7 100644 --- a/Dockerfile-csi-controller.test +++ b/Dockerfile-csi-controller.test @@ -16,7 +16,7 @@ # This Dockerfile.test is for running the csi controller local tests inside a container. # Its similar to the Dockerfile, but with additional requirements-tests.txt and ENTRYPOINT to run the local tests. -FROM registry.access.redhat.com/ubi8/python-38:1-60 +FROM registry.access.redhat.com/ubi8/python-38:1-68 COPY controller/requirements.txt /driver/controller/ RUN pip3 install --upgrade pip==19.3.1 diff --git a/Dockerfile-csi-node b/Dockerfile-csi-node index a70262e03..ead546680 100644 --- a/Dockerfile-csi-node +++ b/Dockerfile-csi-node @@ -27,10 +27,10 @@ COPY . . RUN make ibm-block-csi-driver # Final stage -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4-200 +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4-208 MAINTAINER IBM Storage -ARG VERSION=1.6.0 +ARG VERSION=1.7.0 ARG BUILD_NUMBER=0 LABEL name="IBM block storage CSI driver node" \ diff --git a/Dockerfile-csi-test b/Dockerfile-csi-test index 5f6e3cd63..5422847ab 100644 --- a/Dockerfile-csi-test +++ b/Dockerfile-csi-test @@ -33,6 +33,6 @@ ENV SECRET_FILE="/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver ENV PARAM_FILE="/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver/${CSI_PARAMS}" ENV ENDPOINT="/tmp/k8s_dir/nodecsi" ENV ENDPOINT_CONTROLLER="/tmp/k8s_dir/f" -ENV TESTS_TO_RUN_FILE="/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver/csi_tests_to_run" +ENV TESTS_TO_SKIP_FILE="/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver/csi_tests_to_skip" ENTRYPOINT ["/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver/entrypoint-csi-tests.sh"] diff --git a/NOTICES b/NOTICES index 369dce34b..70e3f39be 100644 --- a/NOTICES +++ b/NOTICES @@ -5,7 +5,7 @@ This file details additional third party software license agreements and third party notices and information that are required to be reproduced for the following programs: -IBM Block Storage CSI Driver version 1.6.0 +IBM Block Storage CSI Driver version 1.7.0 @@ -1409,7 +1409,7 @@ End of GNU GPL Version 3.0 License =========================================================================== END OF TERMS AND CONDITIONS FOR SEPARATELY LICENSED CODE for IBM Block -Storage CSI Driver version 1.6.0 +Storage CSI Driver version 1.7.0 =========================================================================== @@ -1420,7 +1420,7 @@ Storage CSI Driver version 1.6.0 GNU GPL and / or LGPL Source Code for: -IBM Block Storage CSI Driver 1.6.0 +IBM Block Storage CSI Driver 1.7.0 =========================================================================== @@ -1439,7 +1439,7 @@ General Public License 2.0. Red Hat Universal Base Image 8 Python 3.8 Source code to any of the above-listed packages distributed with IBM -Block Storage CSI Driver 1.6.0 is available at the website below, when +Block Storage CSI Driver 1.7.0 is available at the website below, when a URL is provided, or by sending a request to the following address or email: @@ -1468,7 +1468,7 @@ General Public License 3.0. Red Hat Universal Base Image 8 Python 3.8 Source code to any of the above-listed packages distributed with IBM -Block Storage CSI Driver 1.6.0 is available at the website below, when +Block Storage CSI Driver 1.7.0 is available at the website below, when a URL is provided, or by sending a request to the following address or email: @@ -2498,6 +2498,6 @@ END OF yaml-2.2.8 NOTICES AND INFORMATION =========================================================================== END OF NOTICES AND INFORMATION FOR IBM Block Storage CSI -Driver 1.6.0 +Driver 1.7.0 =========================================================================== diff --git a/README.md b/README.md index 3c9980d75..8918e9c0a 100644 --- a/README.md +++ b/README.md @@ -1,409 +1,7 @@ # IBM block storage CSI driver The Container Storage Interface (CSI) Driver for IBM block storage systems enables container orchestrators such as Kubernetes to manage the life cycle of persistent storage. -## Supported orchestration platforms - -The following table details orchestration platforms suitable for deployment of the IBM® block storage CSI driver. - -|Orchestration platform|Version|Architecture| -|----------------------|-------|------------| -|Kubernetes|1.20|x86| -|Kubernetes|1.21|x86| -|Red Hat® OpenShift®|4.7|x86, IBM Z®, IBM Power Systems™1| -|Red Hat OpenShift|4.8|x86, IBM Z, IBM Power Systems1| - -1IBM Power Systems architecture is only supported on Spectrum Virtualize Family storage systems. - -**Note:** As of this document's publication date, IBM Cloud® Satellite only supports RHEL 7 on x86 architecture for Red Hat OpenShift. For the latest support information, see [cloud.ibm.com/docs/satellite](https://cloud.ibm.com/docs/satellite). - -## Supported storage systems - -IBM® block storage CSI driver 1.6.0 supports different IBM storage systems as listed in the following table. - -|Storage system|Microcode version| -|--------------|-----------------| -|IBM FlashSystem™ A9000|12.x| -|IBM FlashSystem A9000R|12.x| -|IBM Spectrum Virtualize™ Family including IBM SAN Volume Controller (SVC) and IBM FlashSystem® family members built with IBM Spectrum® Virtualize (including FlashSystem 5xxx, 7200, 9100, 9200, 9200R)|7.8 and above, 8.x| -|IBM Spectrum Virtualize as software only|7.8 and above, 8.x| -|IBM DS8000® Family|8.x and higher with same API interface| - -**Note:** - -- Newer microcode versions may also be compatible. When a newer microcode version becomes available, contact IBM Support to inquire whether the new microcode version is compatible with the current version of the CSI driver. -- The IBM Spectrum Virtualize Family and IBM SAN Volume Controller storage systems run the IBM Spectrum Virtualize software. In addition, IBM Spectrum Virtualize package is available as a deployable solution that can be run on any compatible hardware. - -## Supported operating systems - -The following table lists operating systems required for deployment of the IBM® block storage CSI driver. - -|Operating system|Architecture| -|----------------|------------| -|Red Hat® Enterprise Linux® (RHEL) 7.x|x86, IBM Z®| -|Red Hat Enterprise Linux CoreOS (RHCOS)|x86, IBM Z®2, IBM Power Systems™1| - -1IBM Power Systems architecture is only supported on Spectrum Virtualize Family storage systems.
-2IBM Z and IBM Power Systems architectures are only supported using CLI installation. - -For full product information, see [IBM block storage CSI driver documentation](https://www.ibm.com/docs/en/stg-block-csi-driver). - -
-
-
- -## Prerequisites -Perform these steps for each worker node in Kubernetes cluster to prepare your environment for installing the CSI (Container Storage Interface) driver. - -1. **For RHEL OS users:** Ensure iSCSI connectivity. If using RHCOS or if the packages are already installed, skip this step and continue to step 2. - -2. Configure Linux® multipath devices on the host. - - **Important:** Be sure to configure each worker with storage connectivity according to your storage system instructions. For more information, find your storage system documentation in [IBM Documentation](http://www.ibm.com/docs/). - - **Additional configuration steps for OpenShift® Container Platform users (RHEL and RHCOS).** Other users can continue to step 3. - - Download and save the following yaml file: - - ``` - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/master/deploy/99-ibm-attach.yaml > 99-ibm-attach.yaml - ``` - - This file can be used for both Fibre Channel and iSCSI configurations. To support iSCSI, uncomment the last two lines in the file. - - **Important:** The 99-ibm-attach.yaml configuration file overrides any files that already exist on your system. Only use this file if the files mentioned are not already created.
If one or more have been created, edit this yaml file, as necessary. - - Apply the yaml file. - - `oc apply -f 99-ibm-attach.yaml` - -3. If needed, enable support for volume snapshots (FlashCopy® function) on your Kubernetes cluster. - - For more information and instructions, see the Kubernetes blog post, [Kubernetes 1.20: Kubernetes Volume Snapshot Moves to GA](https://kubernetes.io/blog/2020/12/10/kubernetes-1.20-volume-snapshot-moves-to-ga/). - - Install both the Snapshot CRDs and the Common Snapshot Controller once per cluster. - - The instructions and relevant yaml files to enable volume snapshots can be found at: [https://github.com/kubernetes-csi/external-snapshotter#usage](https://github.com/kubernetes-csi/external-snapshotter#usage) - -4. Configure storage system connectivity. - - 1. Define the host of each Kubernetes node on the relevant storage systems with the valid WWPN (for Fibre Channel) or IQN (for iSCSI) of the node. - - 2. For Fibre Channel, configure the relevant zoning from the storage to the host. - -
-
-
- -## Installing the driver - -The operator for IBM® block storage CSI driver can be installed directly with GitHub. Installing the CSI (Container Storage Interface) driver is part of the operator installation process. - -Use the following steps to install the operator and driver, with [GitHub](https://github.com/IBM/ibm-block-csi-operator). - -**Note:** Before you begin, you may need to create a user-defined namespace. Create the project namespace, using the `kubectl create ns ` command. - -1. Install the operator. - - 1. Download the manifest from GitHub. - - ``` - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.6.0/deploy/installer/generated/ibm-block-csi-operator.yaml > ibm-block-csi-operator.yaml - ``` - - 2. **Optional:** Update the image fields in the ibm-block-csi-operator.yaml. - - 3. Install the operator, using a user-defined namespace. - - ``` - kubectl -n apply -f ibm-block-csi-operator.yaml - ``` - - 4. Verify that the operator is running. (Make sure that the Status is _Running_.) - - ```screen - $ kubectl get pod -l app.kubernetes.io/name=ibm-block-csi-operator -n - NAME READY STATUS RESTARTS AGE - ibm-block-csi-operator-5bb7996b86-xntss 1/1 Running 0 10m - ``` - -2. Install the IBM block storage CSI driver by creating an IBMBlockCSI custom resource. - - 1. Download the manifest from GitHub. - - ``` - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.6.0/deploy/crds/csi.ibm.com_v1_ibmblockcsi_cr.yaml > csi.ibm.com_v1_ibmblockcsi_cr.yaml - ``` - - 2. **Optional:** Update the image repository field, tag field, or both in the csi.ibm.com_v1_ibmblockcsi_cr.yaml. - - 3. Install the csi.ibm.com_v1_ibmblockcsi_cr.yaml. - - ``` - kubectl -n apply -f csi.ibm.com_v1_ibmblockcsi_cr.yaml - ``` - - 4. Verify that the driver is running: - ```bash - $ kubectl get pods -n -l csi - NAME READY STATUS RESTARTS AGE - ibm-block-csi-controller-0 6/6 Running 0 9m36s - ibm-block-csi-node-jvmvh 3/3 Running 0 9m36s - ibm-block-csi-node-tsppw 3/3 Running 0 9m36s - ibm-block-csi-operator-5bb7996b86-xntss 1/1 Running 0 10m - ``` - -
-
-
- -## Configuring k8s secret and storage class -In order to use the driver, create the relevant storage classes and secrets, as needed. - -This section describes how to: - 1. Create a storage system secret - to define the storage system credentials (user and password) and its address. - 2. Configure the storage class - to define the storage system pool name, secret reference, `SpaceEfficiency`, and `fstype`. - -### Creating a Secret - -Create an array secret YAML file in order to define the storage credentials (username and password) and address. - -**Important:** When your storage system password is changed, be sure to also change the passwords in the corresponding secrets, particularly when LDAP is used on the storage systems.

Failing to do so causes mismatched passwords across the storage systems and the secrets, causing the user to be locked out of the storage systems. - -Use one of the following procedures to create and apply the secret: - -#### Creating an array secret file -1. Create the secret file, similar to the following demo-secret.yaml: - - The `management_address` field can contain more than one address, with each value separated by a comma. - - - ``` - kind: Secret - apiVersion: v1 - metadata: - name: demo-secret - namespace: default - type: Opaque - stringData: - management_address: demo-management-address # Array management addresses - username: demo-username # Array username - data: - password: ZGVtby1wYXNzd29yZA== # base64 array password - ``` - -2. Apply the secret using the following command: - - `kubectl apply -f demo-secret.yaml` - - - The `secret/ created` message is emitted. - - -#### Creating an array secret via command line -**Note:** This procedure is applicable for both Kubernetes and Red Hat® OpenShift®. For Red Hat OpenShift, replace `kubectl` with `oc` in all relevant commands. - -Create the secret using the following command: - - ``` - kubectl create secret generic --from-literal=username= --from-literal=password=--from-literal=management_address= -n - ``` - - -### Creating a StorageClass - -Create a storage class yaml file in order to define the storage system pool name, secret reference, `SpaceEfficiency`, and `fstype`. - -Use the following procedure to create and apply the storage classes. - -**Note:** This procedure is applicable for both Kubernetes and Red Hat® OpenShift®. For Red Hat OpenShift, replace `kubectl` with `oc` in all relevant commands. - -Create a storage class yaml file, similar to the following demo-storageclass.yaml. - -Update the capabilities, pools, and array secrets, as needed. - -Use the `SpaceEfficiency` parameters for each storage system, as defined in [the following table](#spaceefficiency). These values are not case-sensitive. - -_**Table:** `SpaceEfficiency` parameter definitions per storage system type_ - -|Storage system type|SpaceEfficiency parameter options| -|-------------------|---------------------------------| -|IBM FlashSystem® A9000 and A9000R|Always includes deduplication and compression. No need to specify during configuration.| -|IBM Spectrum® Virtualize Family|- thick (default value)
- thin
- compressed
- deduplicated

**Note:** If not specified, the default value is thick.| -|IBM® DS8000® Family| - none (default value)
- thin

**Note:** If not specified, the default value is none.| - -- The IBM DS8000 Family `pool` value is the pool ID and not the pool name as is used in other storage systems. -- Be sure that the `pool` value is the name of an existing pool on the storage system. -- The `allowVolumeExpansion` parameter is optional but is necessary for using volume expansion. The default value is _false_. - -**Note:** Be sure to set the value to true to allow volume expansion. - -- The `csi.storage.k8s.io/fstype` parameter is optional. The values that are allowed are _ext4_ or _xfs_. The default value is _ext4_. -- The `volume_name_prefix` parameter is optional. - -**Note:** For IBM DS8000 Family, the maximum prefix length is five characters. The maximum prefix length for other systems is 20 characters.

For storage systems that use Spectrum Virtualize, the `CSI_` prefix is added as default if not specified by the user. - - - kind: StorageClass - apiVersion: storage.k8s.io/v1 - metadata: - name: demo-storageclass - provisioner: block.csi.ibm.com - parameters: - SpaceEfficiency: deduplicated # Optional. - pool: demo-pool - - csi.storage.k8s.io/provisioner-secret-name: demo-secret - csi.storage.k8s.io/provisioner-secret-namespace: default - csi.storage.k8s.io/controller-publish-secret-name: demo-secret - csi.storage.k8s.io/controller-publish-secret-namespace: default - csi.storage.k8s.io/controller-expand-secret-name: demo-secret - csi.storage.k8s.io/controller-expand-secret-namespace: default - - csi.storage.k8s.io/fstype: xfs # Optional. Values ext4\xfs. The default is ext4. - volume_name_prefix: demoPVC # Optional. - allowVolumeExpansion: true - - -Apply the storage class. - - ``` - kubectl apply -f demo-storageclass.yaml - ``` - -The `storageclass.storage.k8s.io/demo-storageclass created` message is emitted. - -
-
-
- -## Driver usage -### Creating PVC for volume with Filesystem - -Create a PVC yaml file, similar to the following demo-pvc-file-system.yaml file, with the size of 1 Gb. - -**Note:** `volumeMode` is an optional field. `Filesystem` is the default if the value is not added. - -
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
-  name: demo-pvc-file-system
-spec:
-  volumeMode: Filesystem  # Optional. The default is Filesystem.
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-  storageClassName: demo-storageclass
-
- - -``` -kubectl apply -f .yaml -``` -The `persistentvolumeclaim/ created` message is emitted. - -### Creating a StatefulSet with file system volumes - -Create a StatefulSet yaml file, similar to the following demo-statefulset-file-system.yaml file. - -
-kind: StatefulSet
-apiVersion: apps/v1
-metadata:
-  name: demo-statefulset-file-system
-spec:
-  selector:
-    matchLabels:
-      app: demo-statefulset
-  serviceName: demo-statefulset
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: demo-statefulset
-    spec:
-      containers:
-      - name: demo-container
-        image: registry.access.redhat.com/ubi8/ubi:latest
-        command: [ "/bin/sh", "-c", "--" ]
-        args: [ "while true; do sleep 30; done;" ]
-        volumeMounts:
-          - name: demo-volume-file-system
-            mountPath: "/data"
-      volumes:
-      - name: demo-volume-file-system
-        persistentVolumeClaim:
-          claimName: demo-pvc-file-system
-
- -``` -kubectl apply -f .yaml -``` - -The `statefulset.apps/ created` message is emitted. - -``` -kubectl get pod demo-statefulset-0 -NAME READY STATUS RESTARTS AGE -demo-statefulset-file-system-0 1/1 Running 0 43s -``` - -Review the mountpoint inside the pod: -``` -kubectl exec demo-statefulset-file-system-0 -- bash -c "df -h /data" -Filesystem Size Used Avail Use% Mounted on -/dev/mapper/mpathz 1014M 33M 982M 4% /data -``` - - -Delete StatefulSet and PVC - -```sh -$> kubectl delete statefulset/demo-statefulset-file-system -statefulset/demo-statefulset-file-system deleted - -$> kubectl get statefulset/demo-statefulset-file-system -No resources found. - -$> kubectl delete pvc/demo-pvc-file-system -persistentvolumeclaim/demo-pvc-file-system deleted - -$> kubectl get pv,pvc -No resources found. -``` - -
-
-
- -## Upgrading - -To manually upgrade the CSI (Container Storage Interface) driver from a previous version with GitHub, perform step 1 of the [installation procedure](#installing-the-driver) for the latest version. - -## Uninstalling the driver - -Use this information to uninstall the IBM® CSI (Container Storage Interface) operator and driver with GitHub. - -Perform the following steps in order to uninstall the CSI driver and operator. -1. Delete the IBMBlockCSI custom resource. - - ``` - kubectl -n delete -f csi.ibm.com_v1_ibmblockcsi_cr.yaml - ``` - -2. Delete the operator. - - ``` - kubectl -n delete -f ibm-block-csi-operator.yaml - ``` - - - -
-
-
+For compatibility, prerequisites, release notes, and other user information, see [IBM block storage CSI driver documentation](https://www.ibm.com/docs/en/stg-block-csi-driver). ## Licensing diff --git a/common/config.yaml b/common/config.yaml index df313fcc8..448edec09 100644 --- a/common/config.yaml +++ b/common/config.yaml @@ -1,6 +1,6 @@ identity: name: block.csi.ibm.com - version: 1.6.0 + version: 1.7.0 capabilities: Service: [CONTROLLER_SERVICE, VOLUME_ACCESSIBILITY_CONSTRAINTS] VolumeExpansion: ONLINE diff --git a/controller/array_action/array_action_types.py b/controller/array_action/array_action_types.py index 42dac2ee4..4a91cd1a9 100644 --- a/controller/array_action/array_action_types.py +++ b/controller/array_action/array_action_types.py @@ -1,23 +1,30 @@ +from dataclasses import dataclass + + +@dataclass() class Volume: - def __init__(self, vol_size_bytes, vol_id, vol_name, array_address, pool, copy_source_id, array_type): - self.capacity_bytes = vol_size_bytes - self.id = vol_id - self.name = vol_name - self.array_address = array_address - self.pool = pool - self.copy_source_id = copy_source_id - self.array_type = array_type + capacity_bytes: int + id: str + internal_id: str + name: str + array_address: str + pool: str + copy_source_id: str + array_type: str + space_efficiency: str = None + default_space_efficiency: str = None +@dataclass() class Snapshot: - def __init__(self, capacity_bytes, snapshot_id, snapshot_name, array_address, volume_id, is_ready, array_type): - self.capacity_bytes = capacity_bytes - self.id = snapshot_id - self.name = snapshot_name - self.array_address = array_address - self.source_volume_id = volume_id - self.is_ready = is_ready - self.array_type = array_type + capacity_bytes: int + id: str + internal_id: str + name: str + array_address: str + source_volume_id: str + is_ready: bool + array_type: str class Host: @@ -26,3 +33,13 @@ def __init__(self, host_id, host_name, iscsi_names, wwns): self.name = host_name self.iscsi_names = iscsi_names self.wwns = wwns + + +class Replication: + def __init__(self, name, volume_internal_id, other_volume_internal_id, copy_type, is_ready, is_primary=None): + self.name = name + self.volume_internal_id = volume_internal_id + self.other_volume_internal_id = other_volume_internal_id + self.copy_type = copy_type + self.is_ready = is_ready + self.is_primary = is_primary diff --git a/controller/array_action/array_mediator_abstract.py b/controller/array_action/array_mediator_abstract.py index 234b6a1e6..8c78914cb 100644 --- a/controller/array_action/array_mediator_abstract.py +++ b/controller/array_action/array_mediator_abstract.py @@ -26,7 +26,7 @@ def map_volume_by_initiators(self, vol_id, initiators): if FC_CONNECTIVITY_TYPE == connectivity_type: array_initiators = self.get_array_fc_wwns(host_name) elif ISCSI_CONNECTIVITY_TYPE == connectivity_type: - array_initiators = self.get_iscsi_targets_by_iqn() + array_initiators = self.get_iscsi_targets_by_iqn(host_name) else: raise UnsupportedConnectivityTypeError(connectivity_type) diff --git a/controller/array_action/array_mediator_ds8k.py b/controller/array_action/array_mediator_ds8k.py index ca18bf173..1b4e0c3b2 100644 --- a/controller/array_action/array_mediator_ds8k.py +++ b/controller/array_action/array_mediator_ds8k.py @@ -29,6 +29,7 @@ NO_TOKEN_IS_SPECIFIED = 'BE7A001A' HOST_DOES_NOT_EXIST = 'BE7A0016' MAPPING_DOES_NOT_EXIST = 'BE7A001F' +ERROR_CODE_MAP_VOLUME_NOT_ENOUGH_EXTENTS = 'BE74121B' ERROR_CODE_VOLUME_NOT_FOUND_FOR_MAPPING = 'BE586015' ERROR_CODE_ALREADY_FLASHCOPY = '000000AE' ERROR_CODE_VOLUME_NOT_FOUND_OR_ALREADY_PART_OF_CS_RELATIONSHIP = '00000013' @@ -64,10 +65,11 @@ def scsi_id_to_volume_id(scsi_id): return scsi_id[-4:] -def try_convert_first_arg(converter, args): +def try_convert_first_args(converter, args, args_amount): if args: - converted = converter(args[0]) - return (converted,) + args[1:] + args_to_convert = args[:args_amount] + converted = map(converter, args_to_convert) + return tuple(converted) + args[args_amount:] return () @@ -79,10 +81,13 @@ def is_snapshot(api_volume): return False -@decorator -def convert_scsi_id_to_array_id(mediator_method, self, *args): - args = try_convert_first_arg(scsi_id_to_volume_id, args) - return mediator_method(self, *args) +def convert_scsi_ids_to_array_ids(args_amount=1): + @decorator + def convert_first_args_of_method(mediator_method, self, *args): + args = try_convert_first_args(scsi_id_to_volume_id, args, args_amount) + return mediator_method(self, *args) + + return convert_first_args_of_method def get_flashcopy_as_target_if_exists(api_volume): @@ -101,6 +106,14 @@ def get_array_space_efficiency(space_efficiency): return ARRAY_SPACE_EFFICIENCY_NONE +def _get_parameter_space_efficiency(array_space_efficiency): + if array_space_efficiency == ARRAY_SPACE_EFFICIENCY_THIN: + return config.SPACE_EFFICIENCY_THIN + if array_space_efficiency == ARRAY_SPACE_EFFICIENCY_NONE: + return config.SPACE_EFFICIENCY_NONE + raise array_errors.SpaceEfficiencyNotSupported(array_space_efficiency) + + class DS8KArrayMediator(ArrayMediatorAbstract): SUPPORTED_FROM_VERSION = '7.5.1' @@ -162,21 +175,13 @@ def _connect(self): raise array_errors.UnsupportedStorageVersionError( self.version, self.SUPPORTED_FROM_VERSION ) - except (exceptions.ClientError, exceptions.Unauthorized) as e: - # BE7A002D=Authentication has failed because the user name and - # password combination that you have entered is not valid. - if ERROR_CODE_INVALID_CREDENTIALS or KNOWN_ERROR_CODE_INVALID_CREDENTIALS in str( - e.message).upper(): + except exceptions.ClientException as ex: + error_message = str(ex.message).upper() + if ERROR_CODE_INVALID_CREDENTIALS in error_message or KNOWN_ERROR_CODE_INVALID_CREDENTIALS in error_message: raise array_errors.CredentialsError(self.service_address) - raise ConnectionError() - except exceptions.ClientException as e: logger.error( - 'Failed to connect to DS8K array {}, reason is {}'.format( - self.service_address, - e.details - ) - ) - raise ConnectionError() + 'Failed to connect to DS8K array {}, reason is {}'.format(self.service_address, ex.details)) + raise ex def disconnect(self): pass @@ -215,27 +220,30 @@ def _get_copy_source_id(self, api_volume): return copy_source_id def _generate_volume_response(self, api_volume): - + space_efficiency = _get_parameter_space_efficiency(api_volume.tp) return Volume( - vol_size_bytes=int(api_volume.cap), - vol_id=self._generate_volume_scsi_identifier(volume_id=api_volume.id), - vol_name=api_volume.name, + capacity_bytes=int(api_volume.cap), + id=self._generate_volume_scsi_identifier(volume_id=api_volume.id), + internal_id=api_volume.id, + name=api_volume.name, array_address=self.service_address, copy_source_id=self._get_copy_source_id(api_volume=api_volume), pool=api_volume.pool, - array_type=self.array_type + array_type=self.array_type, + space_efficiency=space_efficiency, + default_space_efficiency=config.SPACE_EFFICIENCY_NONE ) - def _create_api_volume(self, name, size_in_bytes, space_efficiency, pool_id): + def _create_api_volume(self, name, size_in_bytes, array_space_efficiency, pool_id): logger.info("Creating volume with name: {}, size: {}, in pool: {}, with parameters: {}".format( - name, size_in_bytes, pool_id, space_efficiency)) + name, size_in_bytes, pool_id, array_space_efficiency)) try: cli_kwargs = {} cli_kwargs.update({ 'name': name, 'capacity_in_bytes': size_in_bytes, 'pool_id': pool_id, - 'tp': get_array_space_efficiency(space_efficiency), + 'tp': array_space_efficiency, }) logger.debug( @@ -255,20 +263,12 @@ def _create_api_volume(self, name, size_in_bytes, space_efficiency, pool_id): logger.info("finished creating volume {}".format(name)) return self.client.get_volume(api_volume.id) - except (exceptions.NotFound, exceptions.InternalServerError) as ex: - if ERROR_CODE_RESOURCE_NOT_EXISTS or INCORRECT_ID in str(ex.message).upper(): + except exceptions.ClientException as ex: + error_message = str(ex.message).upper() + if ERROR_CODE_RESOURCE_NOT_EXISTS in error_message or INCORRECT_ID in error_message: raise array_errors.PoolDoesNotExist(pool_id, self.identifier) - logger.error( - "Failed to create volume {} on array {}, reason is: {}".format( - name, - self.identifier, - ex.details - ) - ) - raise array_errors.VolumeCreationError(name) - except (exceptions.ClientError, exceptions.ClientException) as ex: - if ERROR_CODE_CREATE_VOLUME_NOT_ENOUGH_EXTENTS in str(ex.message).upper(): - raise array_errors.NotEnoughSpaceInPool(id_or_name=pool_id) + if ERROR_CODE_CREATE_VOLUME_NOT_ENOUGH_EXTENTS in error_message: + raise array_errors.NotEnoughSpaceInPool(pool_id) logger.error( "Failed to create volume {} on array {}, reason is: {}".format( name, @@ -279,7 +279,8 @@ def _create_api_volume(self, name, size_in_bytes, space_efficiency, pool_id): raise array_errors.VolumeCreationError(name) def create_volume(self, volume_name, size_in_bytes, space_efficiency, pool): - api_volume = self._create_api_volume(volume_name, size_in_bytes, space_efficiency, pool) + array_space_efficiency = get_array_space_efficiency(space_efficiency) + api_volume = self._create_api_volume(volume_name, size_in_bytes, array_space_efficiency, pool) return self._generate_volume_response(api_volume) def _extend_volume(self, api_volume, new_size_in_bytes): @@ -288,23 +289,24 @@ def _extend_volume(self, api_volume, new_size_in_bytes): new_size_in_bytes=new_size_in_bytes) except exceptions.NotFound: raise array_errors.ObjectNotFoundError(api_volume.id) - except (exceptions.ClientError, exceptions.ClientException) as ex: + except exceptions.ClientException as ex: if ERROR_CODE_EXPAND_VOLUME_NOT_ENOUGH_EXTENTS in str(ex.message).upper(): raise array_errors.NotEnoughSpaceInPool(api_volume.pool) + raise ex - def copy_to_existing_volume_from_source(self, name, source_name, source_capacity_in_bytes, - minimum_volume_size_in_bytes, pool=None): + @convert_scsi_ids_to_array_ids(args_amount=2) + def copy_to_existing_volume_from_source(self, volume_id, source_id, source_capacity_in_bytes, + minimum_volume_size_in_bytes): logger.debug( "Copy source {0} data to volume {1}. source capacity {2}. Minimal requested volume capacity {3}".format( - name, source_name, source_capacity_in_bytes, + source_id, volume_id, source_capacity_in_bytes, minimum_volume_size_in_bytes)) - api_new_volume = self._get_api_volume_by_name(name, pool_id=pool) - api_source_object = self._get_api_volume_by_name(source_name, pool_id=pool) if minimum_volume_size_in_bytes < source_capacity_in_bytes: - self._extend_volume(api_volume=api_new_volume, + new_api_volume = self._get_api_volume_by_id(volume_id) + self._extend_volume(api_volume=new_api_volume, new_size_in_bytes=source_capacity_in_bytes) options = [FLASHCOPY_PERSISTENT_OPTION] - self._create_flashcopy(source_volume_id=api_source_object.id, target_volume_id=api_new_volume.id, + self._create_flashcopy(source_volume_id=source_id, target_volume_id=volume_id, options=options) def _delete_volume(self, volume_id, not_exist_err=True): @@ -355,7 +357,7 @@ def _delete_object(self, object_id, object_is_snapshot=False): self._delete_flashcopy(flashcopy_id=flashcopy_as_target.id) self._delete_volume(object_id) - @convert_scsi_id_to_array_id + @convert_scsi_ids_to_array_ids() def delete_volume(self, volume_id): logger.info("Deleting volume with id : {0}".format(volume_id)) self._delete_object(volume_id) @@ -369,7 +371,7 @@ def get_volume(self, name, pool=None): return self._generate_volume_response(api_volume) raise array_errors.ObjectNotFoundError(name) - @convert_scsi_id_to_array_id + @convert_scsi_ids_to_array_ids() def expand_volume(self, volume_id, required_bytes): logger.info("Expanding volume with id : {0} to {1} bytes".format(volume_id, required_bytes)) api_volume = self._get_api_volume_by_id(volume_id) @@ -379,7 +381,7 @@ def expand_volume(self, volume_id, required_bytes): self._extend_volume(api_volume=api_volume, new_size_in_bytes=required_bytes) logger.info("Finished Expanding volume {0}.".format(volume_id)) - @convert_scsi_id_to_array_id + @convert_scsi_ids_to_array_ids() def get_volume_mappings(self, volume_id): logger.debug("Getting volume mappings for volume {}".format(volume_id)) try: @@ -398,7 +400,7 @@ def get_volume_mappings(self, volume_id): ) raise ex - @convert_scsi_id_to_array_id + @convert_scsi_ids_to_array_ids() def map_volume(self, volume_id, host_name): logger.debug("Mapping volume {} to host {}".format(volume_id, host_name)) try: @@ -409,12 +411,13 @@ def map_volume(self, volume_id, host_name): except exceptions.NotFound: raise array_errors.HostNotFoundError(host_name) except exceptions.ClientException as ex: - # [BE586015] addLunMappings Volume group operation failure: volume does not exist. + if ERROR_CODE_MAP_VOLUME_NOT_ENOUGH_EXTENTS in str(ex.message).upper(): + raise array_errors.NoAvailableLunError(volume_id) if ERROR_CODE_VOLUME_NOT_FOUND_FOR_MAPPING in str(ex.message).upper(): raise array_errors.ObjectNotFoundError(volume_id) raise array_errors.MappingError(volume_id, host_name, ex.details) - @convert_scsi_id_to_array_id + @convert_scsi_ids_to_array_ids() def unmap_volume(self, volume_id, host_name): logger.debug("Unmapping volume {} from host {}".format(volume_id, host_name)) try: @@ -432,12 +435,11 @@ def unmap_volume(self, volume_id, host_name): logger.debug("Successfully unmapped volume from host with lun {}.".format(lunid)) else: raise array_errors.ObjectNotFoundError(volume_id) - except exceptions.NotFound as ex: + except exceptions.ClientException as ex: if HOST_DOES_NOT_EXIST in str(ex.message).upper(): raise array_errors.HostNotFoundError(host_name) if MAPPING_DOES_NOT_EXIST in str(ex.message).upper(): raise array_errors.VolumeAlreadyUnmappedError(volume_id) - except exceptions.ClientException as ex: raise array_errors.UnmappingError(volume_id, host_name, ex.details) def _get_api_volume_from_volumes(self, volume_candidates, volume_name): @@ -460,8 +462,9 @@ def _get_api_volume_by_name(self, volume_name, pool_id): try: volume_candidates = [] volume_candidates.extend(self.client.get_volumes_by_pool(pool_id)) - except (exceptions.NotFound, exceptions.InternalServerError) as ex: - if ERROR_CODE_RESOURCE_NOT_EXISTS or INCORRECT_ID in str(ex.message).upper(): + except exceptions.ClientException as ex: + error_message = str(ex.message).upper() + if ERROR_CODE_RESOURCE_NOT_EXISTS in error_message or INCORRECT_ID in error_message: raise array_errors.PoolDoesNotExist(pool_id, self.identifier) raise ex @@ -478,19 +481,20 @@ def _get_api_volume_by_id(self, volume_id, not_exist_err=True): except (exceptions.ClientError, exceptions.InternalServerError) as ex: if INCORRECT_ID in str(ex.message).upper(): raise array_errors.IllegalObjectID(volume_id) + return None def _get_flashcopy_process(self, flashcopy_id, not_exist_err=True): logger.info("Getting flashcopy {}".format(flashcopy_id)) try: return self.client.get_flashcopies(flashcopy_id) - except exceptions.NotFound as ex: + except exceptions.ClientException as ex: if ERROR_CODE_RESOURCE_NOT_EXISTS in str(ex.message).upper(): logger.info("{} not found".format(flashcopy_id)) if not_exist_err: raise ex - except Exception as ex: - logger.exception(ex) - raise ex + else: + raise ex + return None def _get_api_snapshot(self, snapshot_name, pool_id=None): logger.debug("Get snapshot : {} in pool: {}".format(snapshot_name, pool_id)) @@ -505,7 +509,7 @@ def _get_api_snapshot(self, snapshot_name, pool_id=None): self.service_address) return api_snapshot - @convert_scsi_id_to_array_id + @convert_scsi_ids_to_array_ids() def get_snapshot(self, volume_id, snapshot_name, pool=None): if not pool: source_api_volume = self._get_api_volume_by_id(volume_id) @@ -515,15 +519,18 @@ def get_snapshot(self, volume_id, snapshot_name, pool=None): return None return self._generate_snapshot_response_with_verification(api_snapshot) - def _create_similar_volume(self, target_volume_name, source_api_volume, pool_id): + def _create_similar_volume(self, target_volume_name, source_api_volume, space_efficiency, pool): logger.info( "creating target api volume '{0}' from source volume '{1}'".format(target_volume_name, source_api_volume.name)) - space_efficiency = source_api_volume.tp + if space_efficiency: + array_space_efficiency = get_array_space_efficiency(space_efficiency) + else: + array_space_efficiency = source_api_volume.tp size_in_bytes = int(source_api_volume.cap) - if not pool_id: - pool_id = source_api_volume.pool - return self._create_api_volume(target_volume_name, size_in_bytes, space_efficiency, pool_id) + if not pool: + pool = source_api_volume.pool + return self._create_api_volume(target_volume_name, size_in_bytes, array_space_efficiency, pool) def _create_flashcopy(self, source_volume_id, target_volume_id, options): logger.info( @@ -534,14 +541,11 @@ def _create_flashcopy(self, source_volume_id, target_volume_id, options): api_flashcopy = self.client.create_flashcopy(source_volume_id=source_volume_id, target_volume_id=target_volume_id, options=options) - except (exceptions.ClientError, exceptions.ClientException) as ex: + except exceptions.ClientException as ex: if ERROR_CODE_ALREADY_FLASHCOPY in str(ex.message).upper(): - raise array_errors.SnapshotAlreadyExists(target_volume_id, - self.service_address) - if ERROR_CODE_VOLUME_NOT_FOUND_OR_ALREADY_PART_OF_CS_RELATIONSHIP in str( - ex.message).upper(): - raise array_errors.ObjectNotFoundError('{} or {}'.format(source_volume_id, - target_volume_id)) + raise array_errors.SnapshotAlreadyExists(target_volume_id, self.service_address) + if ERROR_CODE_VOLUME_NOT_FOUND_OR_ALREADY_PART_OF_CS_RELATIONSHIP in str(ex.message).upper(): + raise array_errors.ObjectNotFoundError('{} or {}'.format(source_volume_id, target_volume_id)) raise ex flashcopy_state = self.get_flashcopy_state(api_flashcopy.id) if not flashcopy_state == FLASHCOPY_STATE_VALID: @@ -554,8 +558,8 @@ def _create_flashcopy(self, source_volume_id, target_volume_id, options): def _delete_target_volume_if_exist(self, target_volume_id): self._delete_volume(target_volume_id, not_exist_err=False) - def _create_snapshot(self, target_volume_name, source_api_volume, pool_id): - target_api_volume = self._create_similar_volume(target_volume_name, source_api_volume, pool_id) + def _create_snapshot(self, target_volume_name, source_api_volume, space_efficiency, pool): + target_api_volume = self._create_similar_volume(target_volume_name, source_api_volume, space_efficiency, pool) options = [FLASHCOPY_NO_BACKGROUND_COPY_OPTION, FLASHCOPY_PERSISTENT_OPTION] try: return self._create_flashcopy(source_api_volume.id, target_api_volume.id, options) @@ -570,7 +574,7 @@ def _generate_snapshot_response_with_verification(self, api_object): raise array_errors.ExpectedSnapshotButFoundVolumeError(api_object.name, self.service_address) return self._generate_snapshot_response(api_object, flashcopy_as_target.sourcevolume) - @convert_scsi_id_to_array_id + @convert_scsi_ids_to_array_ids() def get_object_by_id(self, object_id, object_type): api_object = self._get_api_volume_by_id(object_id, not_exist_err=False) if not api_object: @@ -579,13 +583,13 @@ def get_object_by_id(self, object_id, object_type): return self._generate_snapshot_response_with_verification(api_object) return self._generate_volume_response(api_object) - @convert_scsi_id_to_array_id - def create_snapshot(self, volume_id, snapshot_name, pool=None): + @convert_scsi_ids_to_array_ids() + def create_snapshot(self, volume_id, snapshot_name, space_efficiency, pool): logger.info("creating snapshot '{0}' from volume '{1}'".format(snapshot_name, volume_id)) source_api_volume = self._get_api_volume_by_id(volume_id) if source_api_volume is None: raise array_errors.ObjectNotFoundError(volume_id) - target_api_volume = self._create_snapshot(snapshot_name, source_api_volume, pool) + target_api_volume = self._create_snapshot(snapshot_name, source_api_volume, space_efficiency, pool) logger.info("finished creating snapshot '{0}' from volume '{1}'".format(snapshot_name, volume_id)) return self._generate_snapshot_response(target_api_volume, volume_id) @@ -602,13 +606,13 @@ def _delete_flashcopy(self, flashcopy_id): ) raise ex - @convert_scsi_id_to_array_id + @convert_scsi_ids_to_array_ids() def delete_snapshot(self, snapshot_id): logger.info("Deleting snapshot with id : {0}".format(snapshot_id)) self._delete_object(snapshot_id, object_is_snapshot=True) logger.info("Finished snapshot deletion. id : {0}".format(snapshot_id)) - def get_iscsi_targets_by_iqn(self): + def get_iscsi_targets_by_iqn(self, host_name): return {} def get_array_fc_wwns(self, host_name=None): @@ -656,13 +660,29 @@ def validate_supported_space_efficiency(self, space_efficiency): def _generate_snapshot_response(self, api_snapshot, source_volume_id): return Snapshot(capacity_bytes=int(api_snapshot.cap), - snapshot_id=self._generate_volume_scsi_identifier(api_snapshot.id), - snapshot_name=api_snapshot.name, + id=self._generate_volume_scsi_identifier(api_snapshot.id), + internal_id=api_snapshot.id, + name=api_snapshot.name, array_address=self.service_address, - volume_id=self._generate_volume_scsi_identifier(source_volume_id), + source_volume_id=self._generate_volume_scsi_identifier(source_volume_id), is_ready=True, array_type=self.array_type) def get_flashcopy_state(self, flashcopy_id): flashcopy_process = self._get_flashcopy_process(flashcopy_id) return flashcopy_process.state + + def get_replication(self, volume_internal_id, other_volume_internal_id, other_system_id): + raise NotImplementedError + + def create_replication(self, volume_internal_id, other_volume_internal_id, other_system_id, copy_type): + raise NotImplementedError + + def delete_replication(self, replication_name): + raise NotImplementedError + + def promote_replication_volume(self, replication_name): + raise NotImplementedError + + def demote_replication_volume(self, replication_name): + raise NotImplementedError diff --git a/controller/array_action/array_mediator_interface.py b/controller/array_action/array_mediator_interface.py index 6af676968..c22d2010d 100644 --- a/controller/array_action/array_mediator_interface.py +++ b/controller/array_action/array_mediator_interface.py @@ -46,7 +46,7 @@ def create_volume(self, volume_name, size_in_bytes, space_efficiency, pool): Raises: VolumeAlreadyExists PoolDoesNotExist - PoolDoesNotMatchCapabilities + PoolDoesNotMatchSpaceEfficiency IllegalObjectName VolumeNameIsNotSupported PermissionDenied @@ -54,27 +54,25 @@ def create_volume(self, volume_name, size_in_bytes, space_efficiency, pool): raise NotImplementedError @abstractmethod - def copy_to_existing_volume_from_source(self, name, source_name, source_capacity_in_bytes, - minimum_volume_size_in_bytes, pool=None): + def copy_to_existing_volume_from_source(self, volume_id, source_id, source_capacity_in_bytes, + minimum_volume_size_in_bytes): """ This function should create a volume from source volume or snapshot in the storage system. Args: - name : name of the volume to be created in the storage system - source_name : name of source to create from + volume_id : id of the volume to be created in the storage system + source_id : id of source to create from source_capacity_in_bytes : capacity of source to create from minimum_volume_size_in_bytes : if source capacity is lower than this value volume will be increased to this value - pool : pool of the volume and source object to find them more efficiently. Returns: Volume Raises: ObjectNotFoundError - IllegalObjectName + IllegalObjectID PermissionDenied - PoolParameterIsMissing """ raise NotImplementedError @@ -230,13 +228,14 @@ def get_object_by_id(self, object_id, object_type): raise NotImplementedError @abstractmethod - def create_snapshot(self, volume_id, snapshot_name, pool=None): + def create_snapshot(self, volume_id, snapshot_name, space_efficiency, pool): """ This function should create a snapshot from volume in the storage system. Args: - volume_id : id of the volume to be created from - snapshot_name : name of the snapshot to be created in the storage system - pool : pool to create the snapshot in (if not given, pool taken from source volume) + volume_id : id of the volume to be created from + snapshot_name : name of the snapshot to be created in the storage system + space_efficiency : space efficiency (if empty/None, space efficiency taken from source volume) + pool : pool to create the snapshot in (if empty/None, pool taken from source volume) Returns: Snapshot Raises: @@ -267,12 +266,12 @@ def delete_snapshot(self, snapshot_id): raise NotImplementedError @abstractmethod - def get_iscsi_targets_by_iqn(self): + def get_iscsi_targets_by_iqn(self, host_name): """ This function will return a mapping of the storage array iscsi names to their iscsi target IPs Args: - None + host_name : used to filter relevant hosts Returns: ips_by_iqn : A dict mapping array-iqns to their list of IPs ({iqn1:[ip1, ip2], iqn2:[ip3, ip4, ...], ...}) @@ -280,6 +279,7 @@ def get_iscsi_targets_by_iqn(self): Raises: PermissionDeniedError NoIscsiTargetsFoundError + HostNotFoundError """ raise NotImplementedError @@ -343,6 +343,101 @@ def validate_supported_space_efficiency(self, space_efficiency): """ raise NotImplementedError + @abstractmethod + def get_replication(self, volume_internal_id, other_volume_internal_id, other_system_id): + """ + This function will return the volume replication relationship info + + Args: + volume_internal_id : internal id of the volume in the replication relationship + other_volume_internal_id : internal id of the other volume in the replication relationship + other_system_id : id of the other system of the replication relationship + + Returns: + Replication + + Raises: + ObjectNotFound + IllegalObjectName + PermissionDenied + """ + raise NotImplementedError + + @abstractmethod + def create_replication(self, volume_internal_id, other_volume_internal_id, other_system_id, copy_type): + """ + This function will create and activate a volume replication relationship + + Args: + volume_internal_id : internal id of the volume in the replication relationship + other_volume_internal_id : internal id of the other volume in the replication relationship + other_system_id : id of the other system of the replication relationship + copy_type : sync/async + + Returns: + None + + Raises: + ObjectNotFound + IllegalObjectName + PermissionDenied + """ + raise NotImplementedError + + @abstractmethod + def delete_replication(self, replication_name): + """ + This function will disable and delete a volume replication relationship + + Args: + replication_name : name of the replication relationship + + Returns: + None + + Raises: + ObjectNotFound + IllegalObjectName + PermissionDenied + """ + raise NotImplementedError + + @abstractmethod + def promote_replication_volume(self, replication_name): + """ + This function will promote the role of the volume in the connected system to be primary + + Args: + replication_name : name of the replication relationship + + Returns: + None + + Raises: + ObjectNotFound + IllegalObjectName + PermissionDenied + """ + raise NotImplementedError + + @abstractmethod + def demote_replication_volume(self, replication_name): + """ + This function will demote the role of the volume in the connected system to be secondary + + Args: + replication_name : name of the replication relationship + + Returns: + None + + Raises: + ObjectNotFound + IllegalObjectName + PermissionDenied + """ + raise NotImplementedError + @property @abstractmethod def identifier(self): diff --git a/controller/array_action/array_mediator_svc.py b/controller/array_action/array_mediator_svc.py index 93d4e5886..ef01f2577 100644 --- a/controller/array_action/array_mediator_svc.py +++ b/controller/array_action/array_mediator_svc.py @@ -9,7 +9,7 @@ import controller.array_action.config as config import controller.array_action.errors as array_errors import controller.controller_server.config as controller_config -from controller.array_action.array_action_types import Volume, Snapshot, Host +from controller.array_action.array_action_types import Volume, Snapshot, Host, Replication from controller.array_action.array_mediator_abstract import ArrayMediatorAbstract from controller.array_action.svc_cli_result_reader import SVCListResultsReader from controller.array_action.utils import classproperty, bytes_to_string @@ -32,8 +32,9 @@ OBJ_ALREADY_EXIST = 'CMMVC6035E' FCMAP_ALREADY_EXIST = 'CMMVC6466E' FCMAP_ALREADY_COPYING = 'CMMVC5907E' +FCMAP_ALREADY_IN_THE_STOPPED_STATE = 'CMMVC5912E' VOL_NOT_FOUND = 'CMMVC8957E' -POOL_NOT_MATCH_VOL_CAPABILITIES = 'CMMVC9292E' +POOL_NOT_MATCH_VOL_SPACE_EFFICIENCY = 'CMMVC9292E' NOT_REDUCTION_POOL = 'CMMVC9301E' NOT_ENOUGH_EXTENTS_IN_POOL_EXPAND = 'CMMVC5860E' NOT_ENOUGH_EXTENTS_IN_POOL_CREATE = 'CMMVC8710E' @@ -43,15 +44,21 @@ HOST_NAME_PARAM = 'name' HOST_ISCSI_NAMES_PARAM = 'iscsi_name' HOST_WWPNS_PARAM = 'WWPN' +HOST_PORTSET_ID = 'portset_id' HOSTS_LIST_ERR_MSG_MAX_LENGTH = 300 FCMAP_STATUS_DONE = 'idle_or_copied' +RCRELATIONSHIP_STATE_IDLE = 'idling' +RCRELATIONSHIP_STATE_READY = 'consistent_synchronized' YES = 'yes' ENDPOINT_TYPE_SOURCE = 'source' ENDPOINT_TYPE_TARGET = 'target' +ENDPOINT_TYPE_MASTER = 'master' +ENDPOINT_TYPE_AUX = 'aux' + def is_warning_message(ex): """ Return True if the exception message is warning """ @@ -86,6 +93,33 @@ def build_kwargs_from_parameters(space_efficiency, pool_name, volume_name, return cli_kwargs +def build_create_replication_kwargs(master_cli_volume_id, aux_cli_volume_id, other_system_id, copy_type): + cli_kwargs = { + 'master': master_cli_volume_id, + 'aux': aux_cli_volume_id, + 'cluster': other_system_id, + } + if copy_type == config.REPLICATION_COPY_TYPE_ASYNC: + cli_kwargs.update({'global': True}) + return cli_kwargs + + +def build_start_replication_kwargs(rcrelationship_id, primary_endpoint_type, force): + cli_kwargs = {'object_id': rcrelationship_id} + if primary_endpoint_type: + cli_kwargs.update({'primary': primary_endpoint_type}) + if force: + cli_kwargs.update({'force': True}) + return cli_kwargs + + +def build_stop_replication_kwargs(rcrelationship_id, add_access): + cli_kwargs = {'object_id': rcrelationship_id} + if add_access: + cli_kwargs.update({'access': True}) + return cli_kwargs + + def _get_cli_volume_space_efficiency(cli_volume): space_efficiency = config.SPACE_EFFICIENCY_THICK if cli_volume.se_copy == YES: @@ -172,6 +206,7 @@ def get_system_info(self): for cluster in self.client.svcinfo.lssystem(): if cluster['location'] == 'local': return cluster + return None @property def identifier(self): @@ -185,21 +220,27 @@ def is_active(self): def _generate_volume_response(self, cli_volume): source_volume_wwn = self._get_source_volume_wwn_if_exists(cli_volume) + space_efficiency = _get_cli_volume_space_efficiency(cli_volume) return Volume( - int(cli_volume.capacity), - cli_volume.vdisk_UID, - cli_volume.name, - self.endpoint, - cli_volume.mdisk_grp_name, - source_volume_wwn, - self.array_type) + capacity_bytes=int(cli_volume.capacity), + id=cli_volume.vdisk_UID, + internal_id=cli_volume.id, + name=cli_volume.name, + array_address=self.endpoint, + pool=cli_volume.mdisk_grp_name, + copy_source_id=source_volume_wwn, + array_type=self.array_type, + space_efficiency=space_efficiency, + default_space_efficiency=config.SPACE_EFFICIENCY_THICK + ) def _generate_snapshot_response(self, cli_snapshot, source_volume_id): return Snapshot(int(cli_snapshot.capacity), cli_snapshot.vdisk_UID, + cli_snapshot.id, cli_snapshot.name, self.endpoint, - volume_id=source_volume_id, + source_volume_id=source_volume_id, is_ready=True, array_type=self.array_type) @@ -226,11 +267,11 @@ def _get_cli_volume(self, volume_name, not_exist_err=True): logger.info("volume not found") if not_exist_err: raise array_errors.ObjectNotFoundError(volume_name) - if any(msg_id in ex.my_message for msg_id in (NON_ASCII_CHARS, VALUE_TOO_LONG)): + elif any(msg_id in ex.my_message for msg_id in (NON_ASCII_CHARS, VALUE_TOO_LONG)): raise array_errors.IllegalObjectName(ex.my_message) - except Exception as ex: - logger.exception(ex) - raise ex + else: + raise ex + return None def _get_cli_volume_if_exists(self, volume_name): cli_volume = self._get_cli_volume(volume_name, not_exist_err=False) @@ -371,26 +412,19 @@ def _create_cli_volume(self, name, size_in_bytes, space_efficiency, pool): return cli_volume except (svc_errors.CommandExecutionError, CLIFailureError) as ex: if not is_warning_message(ex.my_message): - logger.error(msg="Cannot create volume {0}, " - "Reason is: {1}".format(name, ex)) + logger.error(msg="Cannot create volume {0}, Reason is: {1}".format(name, ex)) if OBJ_ALREADY_EXIST in ex.my_message: - raise array_errors.VolumeAlreadyExists(name, - self.endpoint) + raise array_errors.VolumeAlreadyExists(name, self.endpoint) if NAME_NOT_EXIST_OR_MEET_RULES in ex.my_message: - raise array_errors.PoolDoesNotExist(pool, - self.endpoint) - if (POOL_NOT_MATCH_VOL_CAPABILITIES in ex.my_message - or NOT_REDUCTION_POOL in ex.my_message): - raise array_errors.PoolDoesNotMatchCapabilities( - pool, space_efficiency, ex) + raise array_errors.PoolDoesNotExist(pool, self.endpoint) + if POOL_NOT_MATCH_VOL_SPACE_EFFICIENCY in ex.my_message or NOT_REDUCTION_POOL in ex.my_message: + raise array_errors.PoolDoesNotMatchSpaceEfficiency(pool, space_efficiency, ex) if NOT_ENOUGH_EXTENTS_IN_POOL_CREATE in ex.my_message: raise array_errors.NotEnoughSpaceInPool(id_or_name=pool) if any(msg_id in ex.my_message for msg_id in (NON_ASCII_CHARS, INVALID_NAME, TOO_MANY_CHARS)): raise array_errors.IllegalObjectName(ex.my_message) raise ex - except Exception as ex: - logger.exception(ex) - raise ex + return None @retry(svc_errors.StorageArrayClientException, tries=5, delay=1) def _rollback_copy_to_target_volume(self, target_volume_name): @@ -407,9 +441,11 @@ def _copy_to_target_volume(self, target_volume_name, source_volume_name): self._rollback_copy_to_target_volume(target_volume_name) raise ex - def copy_to_existing_volume_from_source(self, name, source_name, source_capacity_in_bytes, - minimum_volume_size_in_bytes, pool=None): - self._copy_to_target_volume(name, source_name) + def copy_to_existing_volume_from_source(self, volume_id, source_id, source_capacity_in_bytes, + minimum_volume_size_in_bytes): + source_name = self._get_volume_name_by_wwn(source_id) + target_volume_name = self._get_volume_name_by_wwn(volume_id) + self._copy_to_target_volume(target_volume_name, source_name) def create_volume(self, name, size_in_bytes, space_efficiency, pool): cli_volume = self._create_cli_volume(name, size_in_bytes, space_efficiency, pool) @@ -426,9 +462,6 @@ def _delete_volume_by_name(self, volume_name, not_exist_err=True): if (OBJ_NOT_FOUND in ex.my_message or VOL_NOT_FOUND in ex.my_message) and not_exist_err: raise array_errors.ObjectNotFoundError(volume_name) raise ex - except Exception as ex: - logger.exception(ex) - raise ex def delete_volume(self, volume_id): logger.info("Deleting volume with id : {0}".format(volume_id)) @@ -449,12 +482,14 @@ def get_object_by_id(self, object_id, object_type): return None if object_type is controller_config.SNAPSHOT_TYPE_NAME: return self._generate_snapshot_response_with_verification(cli_object) - return self._generate_volume_response(cli_object) + cli_volume = self._get_cli_volume(cli_object.name) + return self._generate_volume_response(cli_volume) - def _create_similar_volume(self, source_cli_volume, target_volume_name, pool): + def _create_similar_volume(self, source_cli_volume, target_volume_name, space_efficiency, pool): logger.info("creating target cli volume '{0}' from source volume '{1}'".format(target_volume_name, source_cli_volume.name)) - space_efficiency = _get_cli_volume_space_efficiency(source_cli_volume) + if not space_efficiency: + space_efficiency = _get_cli_volume_space_efficiency(source_cli_volume) size_in_bytes = int(source_cli_volume.capacity) if not pool: pool = source_cli_volume.mdisk_grp_name @@ -497,7 +532,8 @@ def _delete_fcmap(self, fcmap_id, force): self.client.svctask.rmfcmap(object_id=fcmap_id, force=force) except (svc_errors.CommandExecutionError, CLIFailureError) as ex: if not is_warning_message(ex.my_message): - logger.warning("Failed to delete fcmap '{0}': {1}".format(fcmap_id, ex)) + logger.error("Failed to delete fcmap '{0}': {1}".format(fcmap_id, ex)) + raise ex def _stop_fcmap(self, fcmap_id): logger.info("stopping fcmap with id : {0}".format(fcmap_id)) @@ -505,21 +541,35 @@ def _stop_fcmap(self, fcmap_id): self.client.svctask.stopfcmap(object_id=fcmap_id) except (svc_errors.CommandExecutionError, CLIFailureError) as ex: if not is_warning_message(ex.my_message): - logger.warning("Failed to stop fcmap '{0}': {1}".format(fcmap_id, ex)) + if FCMAP_ALREADY_IN_THE_STOPPED_STATE in ex.my_message: + logger.info("fcmap '{0}' is already in the stopped state".format(fcmap_id)) + else: + logger.error("Failed to stop fcmap '{0}': {1}".format(fcmap_id, ex)) + raise ex - def _stop_and_delete_fcmap(self, fcmap_id): - self._stop_fcmap(fcmap_id) - self._delete_fcmap(fcmap_id, force=True) + def _safe_stop_and_delete_fcmap(self, fcmap): + if not self._is_in_remote_copy_relationship(fcmap): + self._stop_fcmap(fcmap.id) + self._delete_fcmap(fcmap.id, force=True) def _safe_delete_fcmaps(self, object_name, fcmaps): - unfinished_fcmaps = [fcmap for fcmap in fcmaps - if fcmap.status != FCMAP_STATUS_DONE or fcmap.copy_rate == "0"] - if unfinished_fcmaps: - raise array_errors.ObjectIsStillInUseError(id_or_name=object_name, - used_by=unfinished_fcmaps) + fcmaps_to_delete = [] + fcmaps_in_use = [] + for fcmap in fcmaps: + if not self._is_in_remote_copy_relationship(fcmap): + if fcmap.status != FCMAP_STATUS_DONE or fcmap.copy_rate == "0": + fcmaps_in_use.append(fcmap) + else: + fcmaps_to_delete.append(fcmap) + if fcmaps_in_use: + raise array_errors.ObjectIsStillInUseError(id_or_name=object_name, used_by=fcmaps_in_use) + for fcmap in fcmaps_to_delete: self._delete_fcmap(fcmap.id, force=False) + def _is_in_remote_copy_relationship(self, fcmap): + return fcmap.rc_controlled == YES + def _delete_object(self, cli_object, is_snapshot=False): object_name = cli_object.name fcmap_as_target = self._get_fcmap_as_target_if_exists(object_name) @@ -529,7 +579,7 @@ def _delete_object(self, cli_object, is_snapshot=False): if fcmaps_as_source: self._safe_delete_fcmaps(object_name, fcmaps_as_source) if fcmap_as_target: - self._stop_and_delete_fcmap(fcmap_as_target.id) + self._safe_stop_and_delete_fcmap(fcmap_as_target) self._delete_volume_by_name(object_name) def _delete_unstarted_fcmap_if_exists(self, target_volume_name): @@ -547,9 +597,9 @@ def _rollback_create_snapshot(self, target_volume_name): target_cli_volume = self._delete_unstarted_fcmap_if_exists(target_volume_name) self._delete_target_volume_if_exists(target_cli_volume) - def _create_snapshot(self, target_volume_name, source_cli_volume, pool): + def _create_snapshot(self, target_volume_name, source_cli_volume, space_efficiency, pool): try: - self._create_similar_volume(source_cli_volume, target_volume_name, pool) + self._create_similar_volume(source_cli_volume, target_volume_name, space_efficiency, pool) return self._create_and_start_fcmap(source_cli_volume.name, target_volume_name, is_copy=False) except (svc_errors.CommandExecutionError, CLIFailureError) as ex: logger.error("Failed to create snapshot '{0}': {1}".format(target_volume_name, ex)) @@ -557,11 +607,11 @@ def _create_snapshot(self, target_volume_name, source_cli_volume, pool): self._rollback_create_snapshot(target_volume_name) raise ex - def create_snapshot(self, volume_id, snapshot_name, pool=None): + def create_snapshot(self, volume_id, snapshot_name, space_efficiency, pool): logger.info("creating snapshot '{0}' from volume '{1}'".format(snapshot_name, volume_id)) source_volume_name = self._get_volume_name_by_wwn(volume_id) source_cli_volume = self._get_cli_volume(source_volume_name) - target_cli_volume = self._create_snapshot(snapshot_name, source_cli_volume, pool) + target_cli_volume = self._create_snapshot(snapshot_name, source_cli_volume, space_efficiency, pool) logger.info("finished creating snapshot '{0}' from volume '{1}'".format(snapshot_name, volume_id)) return self._generate_snapshot_response(target_cli_volume, source_cli_volume.vdisk_UID) @@ -726,9 +776,6 @@ def map_volume(self, volume_id, host_name): raise array_errors.LunAlreadyInUseError(lun, host_name) raise array_errors.MappingError(vol_name, host_name, ex) - except Exception as ex: - logger.exception(ex) - raise ex return str(lun) @@ -757,9 +804,6 @@ def unmap_volume(self, volume_id, host_name): volume_name) raise array_errors.UnmappingError(volume_name, host_name, ex) - except Exception as ex: - logger.exception(ex) - raise ex def _get_array_iqns_by_node_id(self): logger.debug("Getting array nodes id and iscsi name") @@ -767,14 +811,18 @@ def _get_array_iqns_by_node_id(self): nodes_list = self.client.svcinfo.lsnode() array_iqns_by_id = {node.id: node.iscsi_name for node in nodes_list if node.status.lower() == "online"} - except Exception as ex: - logger.exception(ex) - raise ex + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.error(ex) + raise ex logger.debug("Found iqns by node id: {}".format(array_iqns_by_id)) return array_iqns_by_id - def _list_ip_ports(self): + def _list_ip_ports(self, portset_id): try: + if portset_id: + filter_value = 'portset_id={}'.format(portset_id) + return self.client.svcinfo.lsip(filtervalue=filter_value) return self.client.svcinfo.lsportip(filtervalue='state=configured:failover=no') except (svc_errors.CommandExecutionError, CLIFailureError) as ex: logger.error("Get iscsi targets failed. Reason is: {}".format(ex)) @@ -784,9 +832,9 @@ def _list_ip_ports(self): def _create_ips_by_node_id_map(ports): ips_by_node_id = defaultdict(list) for port in ports: - if port.IP_address: + if port.get('IP_address'): ips_by_node_id[port.node_id].append(port.IP_address) - if port.IP_address_6: + if port.get('IP_address_6'): ipv6 = port.IP_address_6.join('[]') ips_by_node_id[port.node_id].append(ipv6) return dict(ips_by_node_id) @@ -799,14 +847,15 @@ def _unify_ips_by_iqn(iqns_by_node_id, ips_by_node_id): ips_by_iqn[iqn].extend(ips) return dict(ips_by_iqn) - def _get_iscsi_targets_by_node_id(self): - ports = self._list_ip_ports() + def _get_iscsi_targets_by_node_id(self, host_name): + portset_id = self._get_host_portset_id(host_name) + ports = self._list_ip_ports(portset_id) return self._create_ips_by_node_id_map(ports) - def get_iscsi_targets_by_iqn(self): + def get_iscsi_targets_by_iqn(self, host_name): logger.debug("Getting iscsi targets by iqn") iqns_by_node_id = self._get_array_iqns_by_node_id() - ips_by_node_id = self._get_iscsi_targets_by_node_id() + ips_by_node_id = self._get_iscsi_targets_by_node_id(host_name) ips_by_iqn = self._unify_ips_by_iqn(iqns_by_node_id, ips_by_node_id) if ips_by_iqn and any(ips_by_iqn.values()): @@ -822,7 +871,7 @@ def get_array_fc_wwns(self, host_name): fc_wwns = self.client.svcinfo.lsfabric(host=host_name) for wwn in fc_wwns: state = wwn.get('state', '') - if state == 'active' or state == 'inactive': + if state in ('active', 'inactive'): fc_port_wwns.append(wwn.get('local_wwpn', '')) logger.debug("Getting fc wwns : {}".format(fc_port_wwns)) return fc_port_wwns @@ -830,3 +879,211 @@ def get_array_fc_wwns(self, host_name): logger.error(msg="Failed to get array fc wwn. Reason " "is: {0}".format(ex)) raise ex + + def _get_cli_host_by_name(self, host_name): + filter_value = 'name={}'.format(host_name) + cli_host = self.client.svcinfo.lshost(filtervalue=filter_value).as_single_element + if not cli_host: + raise array_errors.HostNotFoundError(host_name) + return cli_host + + def _get_host_portset_id(self, host_name): + cli_host = self._get_cli_host_by_name(host_name) + return cli_host.get(HOST_PORTSET_ID) + + def _get_replication_endpoint_type(self, rcrelationship): + if self._identifier == rcrelationship.master_cluster_id: + return ENDPOINT_TYPE_MASTER + return ENDPOINT_TYPE_AUX + + @staticmethod + def _get_other_endpoint_type(endpoint_type): + if endpoint_type == ENDPOINT_TYPE_MASTER: + return ENDPOINT_TYPE_AUX + return ENDPOINT_TYPE_MASTER + + def _get_replication_other_endpoint_type(self, rcrelationship): + endpoint_type = self._get_replication_endpoint_type(rcrelationship) + return self._get_other_endpoint_type(endpoint_type) + + @staticmethod + def _is_replication_idle(rcrelationship): + return rcrelationship.state == RCRELATIONSHIP_STATE_IDLE + + @staticmethod + def _is_replication_disconnected(rcrelationship): + return 'disconnected' in rcrelationship.state + + @staticmethod + def _is_replication_ready(rcrelationship): + return rcrelationship.state == RCRELATIONSHIP_STATE_READY + + def _is_replication_endpoint_primary(self, rcrelationship, endpoint_type=None): + if not endpoint_type: + endpoint_type = self._get_replication_endpoint_type(rcrelationship) + if rcrelationship.primary: + return rcrelationship.primary == endpoint_type + return None + + @staticmethod + def _get_replication_copy_type(rcrelationship): + if rcrelationship.copy_type == 'global': + return config.REPLICATION_COPY_TYPE_ASYNC + return config.REPLICATION_COPY_TYPE_SYNC + + def _generate_replication_response(self, rcrelationship, volume_internal_id, other_volume_internal_id): + copy_type = self._get_replication_copy_type(rcrelationship) + is_ready = self._is_replication_ready(rcrelationship) + is_primary = self._is_replication_endpoint_primary(rcrelationship) + return Replication(name=rcrelationship.name, + volume_internal_id=volume_internal_id, + other_volume_internal_id=other_volume_internal_id, + copy_type=copy_type, + is_ready=is_ready, + is_primary=is_primary) + + def _get_lsrcrelationship(self, filter_value): + return self.client.svcinfo.lsrcrelationship(filtervalue=filter_value) + + def _get_rcrelationship_by_name(self, replication_name, not_exist_error=True): + filter_value = 'RC_rel_name={0}'.format(replication_name) + rcrelationship = self._get_lsrcrelationship(filter_value).as_single_element + if not rcrelationship and not_exist_error: + raise array_errors.ObjectNotFoundError(replication_name) + return rcrelationship + + def _get_rcrelationships(self, cli_volume_id, other_cli_volume_id, other_system_id, as_master): + endpoint_type = ENDPOINT_TYPE_AUX + other_endpoint_type = ENDPOINT_TYPE_MASTER + if as_master: + endpoint_type = ENDPOINT_TYPE_MASTER + other_endpoint_type = ENDPOINT_TYPE_AUX + filter_value = '{END}_vdisk_id={VDISK_ID}:' \ + '{OTHER_END}_vdisk_id={OTHER_VDISK_ID}:' \ + '{OTHER_END}_cluster_id={OTHER_CLUSTER_ID}'.format(END=endpoint_type, VDISK_ID=cli_volume_id, + OTHER_END=other_endpoint_type, + OTHER_VDISK_ID=other_cli_volume_id, + OTHER_CLUSTER_ID=other_system_id) + return self._get_lsrcrelationship(filter_value).as_list + + def _get_rcrelationship(self, cli_volume_id, other_cli_volume_id, other_system_id): + rcrelationships = self._get_rcrelationships(cli_volume_id, other_cli_volume_id, + other_system_id, as_master=True) + rcrelationships.extend(self._get_rcrelationships(cli_volume_id, other_cli_volume_id, + other_system_id, as_master=False)) + if len(rcrelationships) != 1: + logger.warning('found {0} rcrelationships for volume id {1} ' + 'with volume id {2} of system {3}'.format(len(rcrelationships), + cli_volume_id, + other_cli_volume_id, + other_system_id)) + return None + return rcrelationships[0] + + def get_replication(self, volume_internal_id, other_volume_internal_id, other_system_id): + rcrelationship = self._get_rcrelationship(volume_internal_id, other_volume_internal_id, other_system_id) + if not rcrelationship: + return None + logger.info("found rcrelationship: {}".format(rcrelationship)) + return self._generate_replication_response(rcrelationship, volume_internal_id, other_volume_internal_id) + + def _create_rcrelationship(self, master_cli_volume_id, aux_cli_volume_id, other_system_id, copy_type): + logger.info("creating remote copy relationship for master volume id: {0} " + "and auxiliary volume id: {1} with system {2} using {3} copy type".format(master_cli_volume_id, + aux_cli_volume_id, + other_system_id, + copy_type)) + kwargs = build_create_replication_kwargs(master_cli_volume_id, aux_cli_volume_id, other_system_id, copy_type) + try: + svc_response = self.client.svctask.mkrcrelationship(**kwargs) + message = str(svc_response.response[0]) + id_start, id_end = message.find('[') + 1, message.find(']') + raw_id = message[id_start:id_end] + return int(raw_id) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.error("failed to create rcrelationship for volume id {0} " + "with volume id {1} of system {2}: {3}".format(master_cli_volume_id, + aux_cli_volume_id, + other_system_id, + ex)) + raise ex + return None + + def _start_rcrelationship(self, rcrelationship_id, primary_endpoint_type=None, force=False): + logger.info("starting remote copy relationship with id: {} primary: {} force: {}".format(rcrelationship_id, + primary_endpoint_type, + force)) + try: + kwargs = build_start_replication_kwargs(rcrelationship_id, primary_endpoint_type, force) + self.client.svctask.startrcrelationship(**kwargs) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.warning("failed to start rcrelationship '{}': {}".format(rcrelationship_id, ex)) + + def create_replication(self, volume_internal_id, other_volume_internal_id, other_system_id, copy_type): + rc_id = self._create_rcrelationship(volume_internal_id, other_volume_internal_id, other_system_id, copy_type) + self._start_rcrelationship(rc_id) + + def _stop_rcrelationship(self, rcrelationship_id, add_access_to_secondary=False): + logger.info("stopping remote copy relationship with id: {}. access: {}".format(rcrelationship_id, + add_access_to_secondary)) + kwargs = build_stop_replication_kwargs(rcrelationship_id, add_access_to_secondary) + try: + self.client.svctask.stoprcrelationship(**kwargs) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.warning("failed to stop rcrelationship '{0}': {1}".format(rcrelationship_id, ex)) + + def _delete_rcrelationship(self, rcrelationship_id): + logger.info("deleting remote copy relationship with id: {0}".format(rcrelationship_id)) + try: + self.client.svctask.rmrcrelationship(object_id=rcrelationship_id) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.warning("failed to delete rcrelationship '{0}': {1}".format(rcrelationship_id, ex)) + + def delete_replication(self, replication_name): + rcrelationship = self._get_rcrelationship_by_name(replication_name, not_exist_error=False) + if not rcrelationship: + logger.info("could not find replication with name {}".format(replication_name)) + return + self._stop_rcrelationship(rcrelationship.id) + self._delete_rcrelationship(rcrelationship.id) + + def _promote_replication_endpoint(self, endpoint_type, replication_name): + logger.info("making '{}' primary for remote copy relationship {}".format(endpoint_type, replication_name)) + try: + self.client.svctask.switchrcrelationship(primary=endpoint_type, object_id=replication_name) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.error("failed to make '{}' primary for rcrelationship {}: {}".format(endpoint_type, + replication_name, + ex.my_message)) + raise + logger.info("succeeded making '{}' primary for remote copy relationship {}".format(endpoint_type, + replication_name)) + + def _ensure_endpoint_is_primary(self, rcrelationship, endpoint_type): + if self._is_replication_endpoint_primary(rcrelationship, endpoint_type): + logger.info("'{}' is already primary for rcrelationship {}. " + "skipping the switch".format(endpoint_type, + rcrelationship.name)) + return + if self._is_replication_idle(rcrelationship): + other_endpoint_type = self._get_other_endpoint_type(endpoint_type) + self._start_rcrelationship(rcrelationship.id, primary_endpoint_type=other_endpoint_type, force=True) + self._promote_replication_endpoint(endpoint_type, rcrelationship.name) + + def promote_replication_volume(self, replication_name): + rcrelationship = self._get_rcrelationship_by_name(replication_name) + if self._is_replication_disconnected(rcrelationship): + self._stop_rcrelationship(rcrelationship.id, add_access_to_secondary=True) + return + endpoint_type = self._get_replication_endpoint_type(rcrelationship) + self._ensure_endpoint_is_primary(rcrelationship, endpoint_type) + + def demote_replication_volume(self, replication_name): + rcrelationship = self._get_rcrelationship_by_name(replication_name) + endpoint_type_to_promote = self._get_replication_other_endpoint_type(rcrelationship) + self._ensure_endpoint_is_primary(rcrelationship, endpoint_type_to_promote) diff --git a/controller/array_action/array_mediator_xiv.py b/controller/array_action/array_mediator_xiv.py index 8c88a85e4..6864ab1c3 100644 --- a/controller/array_action/array_mediator_xiv.py +++ b/controller/array_action/array_mediator_xiv.py @@ -19,6 +19,7 @@ LUN_IS_ALREADY_IN_USE_ERROR = "LUN is already in use" UNDEFINED_MAPPING_ERROR = "The requested mapping is not defined" NO_ALLOCATION_SPACE_ERROR = "No space to allocate to the volume" +NOT_AVAILABLE = "Not Available" class XIVArrayMediator(ArrayMediatorAbstract): @@ -114,35 +115,54 @@ def is_active(self): def _convert_size_blocks_to_bytes(self, size_in_blocks): return int(size_in_blocks) * self.BLOCK_SIZE_IN_BYTES + @staticmethod + def _is_gen3(cli_object): + return not hasattr(cli_object, "copy_master_wwn") + + def _get_snapshot_source_wwn(self, cli_snapshot): + if self._is_gen3(cli_snapshot): + source_name = cli_snapshot.master_name + cli_source = self._get_cli_object_by_name(source_name) + return cli_source.wwn + return cli_snapshot.copy_master_wwn + + def _get_volume_source_wwn(self, cli_volume): + if self._is_gen3(cli_volume) or cli_volume.copy_master_wwn == NOT_AVAILABLE: + return None + return cli_volume.copy_master_wwn + def _generate_volume_response(self, cli_volume): - # vol_copy_type and copy_master_wwn were added in a9k. In xiv they didn't exist - is_copy = hasattr(cli_volume, "vol_copy_type") and cli_volume.vol_copy_type == "Copy" - copy_src_object_wwn = cli_volume.copy_master_wwn if is_copy else None + source_object_wwn = self._get_volume_source_wwn(cli_volume) return Volume(self._convert_size_blocks_to_bytes(cli_volume.capacity), cli_volume.wwn, + cli_volume.id, cli_volume.name, self.endpoint, cli_volume.pool_name, - copy_src_object_wwn, + source_object_wwn, self.array_type) def _generate_snapshot_response(self, cli_snapshot): return Snapshot(self._convert_size_blocks_to_bytes(cli_snapshot.capacity), cli_snapshot.wwn, + cli_snapshot.id, cli_snapshot.name, self.endpoint, - volume_id=cli_snapshot.copy_master_wwn, + source_volume_id=self._get_snapshot_source_wwn(cli_snapshot), is_ready=True, array_type=self.array_type) - def get_volume(self, volume_name, pool=None): - logger.debug("Get volume : {}".format(volume_name)) + def _get_cli_object_by_name(self, volume_name): try: - cli_volume = self.client.cmd.vol_list(vol=volume_name).as_single_element + return self.client.cmd.vol_list(vol=volume_name).as_single_element except xcli_errors.IllegalNameForObjectError as ex: logger.exception(ex) raise array_errors.IllegalObjectName(ex.status) + def get_volume(self, volume_name, pool=None): + logger.debug("Get volume : {}".format(volume_name)) + cli_volume = self._get_cli_object_by_name(volume_name) + logger.debug("cli volume returned : {}".format(cli_volume)) if not cli_volume: raise array_errors.ObjectNotFoundError(volume_name) @@ -212,13 +232,17 @@ def create_volume(self, name, size_in_bytes, space_efficiency, pool): logger.exception(ex) if NO_ALLOCATION_SPACE_ERROR in ex.status: raise array_errors.NotEnoughSpaceInPool(id_or_name=pool) + raise ex - def copy_to_existing_volume_from_source(self, name, source_name, source_capacity_in_bytes, - minimum_volume_size_in_bytes, pool=None): + def copy_to_existing_volume_from_source(self, volume_id, source_id, source_capacity_in_bytes, + minimum_volume_size_in_bytes): logger.debug( "Copy source {0} data to volume {1}. source capacity {2}. Minimal requested volume capacity {3}".format( - name, source_name, source_capacity_in_bytes, minimum_volume_size_in_bytes)) + source_id, volume_id, source_capacity_in_bytes, minimum_volume_size_in_bytes)) try: + name = self._get_object_name_by_wwn(volume_id=volume_id) + source_name = self._get_object_name_by_wwn(volume_id=source_id) + logger.debug("Formatting volume {0}".format(name)) self.client.cmd.vol_format(vol=name) logger.debug("Copying source {0} data to volume {1}.".format(source_name, name)) @@ -233,13 +257,13 @@ def copy_to_existing_volume_from_source(self, name, source_name, source_capacity raise array_errors.IllegalObjectName(ex.status) except xcli_errors.SourceVolumeBadNameError as ex: logger.exception(ex) - raise array_errors.ObjectNotFoundError(source_name) + raise array_errors.ObjectNotFoundError(source_id) except (xcli_errors.VolumeBadNameError, xcli_errors.TargetVolumeBadNameError) as ex: logger.exception(ex) - raise array_errors.ObjectNotFoundError(name) + raise array_errors.ObjectNotFoundError(volume_id) except xcli_errors.OperationForbiddenForUserCategoryError as ex: logger.exception(ex) - raise array_errors.PermissionDeniedError("create volume : {0}".format(name)) + raise array_errors.PermissionDeniedError("copy to {0} from source: {1}".format(volume_id, source_id)) def _get_cli_object_by_wwn(self, volume_id, not_exist_err=False): try: @@ -302,7 +326,7 @@ def get_object_by_id(self, object_id, object_type): return self._generate_snapshot_response(cli_object) return self._generate_volume_response(cli_object) - def create_snapshot(self, volume_id, snapshot_name, pool=None): + def create_snapshot(self, volume_id, snapshot_name, space_efficiency, pool): logger.info("creating snapshot {0} from volume {1}".format(snapshot_name, volume_id)) source_cli_volume = self._get_cli_object_by_wwn(volume_id) if pool and pool != source_cli_volume.pool_name: @@ -385,7 +409,7 @@ def _get_next_available_lun(self, host_name): logger.exception(ex) raise array_errors.HostNotFoundError(host_name) - luns_in_use = set([int(host_mapping.lun) for host_mapping in host_mapping_list]) + luns_in_use = set(int(host_mapping.lun) for host_mapping in host_mapping_list) logger.debug("luns in use : {0}".format(luns_in_use)) # try to use random lun number just in case there are many calls at the same time to reduce re-tries @@ -462,7 +486,7 @@ def _get_array_iqn(self): config_get_list = self.client.cmd.config_get().as_list return next(c.value for c in config_get_list if c.name == "iscsi_name") - def get_iscsi_targets_by_iqn(self): + def get_iscsi_targets_by_iqn(self, host_name): array_iqn = self._get_array_iqn() iscsi_targets = self._get_iscsi_targets() return {array_iqn: iscsi_targets} @@ -470,3 +494,18 @@ def get_iscsi_targets_by_iqn(self): def get_array_fc_wwns(self, host_name): fc_wwns_objects = self.client.cmd.fc_port_list() return [port.wwpn for port in fc_wwns_objects if port.port_state == 'Online' and port.role == 'Target'] + + def get_replication(self, volume_internal_id, other_volume_internal_id, other_system_id): + raise NotImplementedError + + def create_replication(self, volume_internal_id, other_volume_internal_id, other_system_id, copy_type): + raise NotImplementedError + + def delete_replication(self, replication_name): + raise NotImplementedError + + def promote_replication_volume(self, replication_name): + raise NotImplementedError + + def demote_replication_volume(self, replication_name): + raise NotImplementedError diff --git a/controller/array_action/config.py b/controller/array_action/config.py index ab36bcf0c..d41f1ba89 100644 --- a/controller/array_action/config.py +++ b/controller/array_action/config.py @@ -5,6 +5,9 @@ SPACE_EFFICIENCY_DEDUPLICATED = 'deduplicated' SPACE_EFFICIENCY_THICK = 'thick' SPACE_EFFICIENCY_NONE = 'none' +REPLICATION_COPY_TYPE_SYNC = "sync" +REPLICATION_COPY_TYPE_ASYNC = "async" +REPLICATION_DEFAULT_COPY_TYPE = REPLICATION_COPY_TYPE_SYNC # volume context CONTEXT_POOL = "pool" diff --git a/controller/array_action/errors.py b/controller/array_action/errors.py index 1024224e6..f1850b6b9 100644 --- a/controller/array_action/errors.py +++ b/controller/array_action/errors.py @@ -78,11 +78,11 @@ def __init__(self, msg): self.message = "{0}".format(msg) -class PoolDoesNotMatchCapabilities(InvalidArgumentError): +class PoolDoesNotMatchSpaceEfficiency(InvalidArgumentError): - def __init__(self, pool, capabilities, error): - self.message = messages.PoolDoesNotMatchCapabilities_message.format(pool, capabilities, - error) + def __init__(self, pool, space_efficiency, error): + self.message = messages.PoolDoesNotMatchSpaceEfficiency_message.format(pool, space_efficiency, + error) class SpaceEfficiencyNotSupported(InvalidArgumentError): diff --git a/controller/array_action/messages.py b/controller/array_action/messages.py index 792b13e60..8f8bc2797 100644 --- a/controller/array_action/messages.py +++ b/controller/array_action/messages.py @@ -9,7 +9,7 @@ VolumeNameBelongsToSnapshotError_message = "Volume not found. Snapshot with the same id exists. \ Name : {0} , array : {1}" -PoolDoesNotMatchCapabilities_message = "Pool : {0} does not match the following capabilities : {1} . error : {2}" +PoolDoesNotMatchSpaceEfficiency_message = "Pool : {0} does not match the following space efficiency : {1} . error : {2}" SpaceEfficiencyNotSupported_message = "space efficiency is not supported : {0} " diff --git a/controller/common/node_info.py b/controller/common/node_info.py index 41089b2e9..70e328c5f 100644 --- a/controller/common/node_info.py +++ b/controller/common/node_info.py @@ -27,7 +27,7 @@ def __init__(self, iscsi_iqn, fc_wwns): """ self.iscsi_iqn = iscsi_iqn self.fc_wwns = fc_wwns - self._fc_wwns_lowercase_set = set([wwn.lower() for wwn in fc_wwns]) + self._fc_wwns_lowercase_set = set(wwn.lower() for wwn in fc_wwns) self._iscsi_iqn_lowercase = iscsi_iqn.lower() def is_array_wwns_match(self, host_wwns): diff --git a/controller/controller_server/addons_server.py b/controller/controller_server/addons_server.py new file mode 100644 index 000000000..d58b29eab --- /dev/null +++ b/controller/controller_server/addons_server.py @@ -0,0 +1,161 @@ +import grpc + +from controller.array_action import errors as array_errors +from controller.array_action.config import REPLICATION_DEFAULT_COPY_TYPE +from controller.array_action.storage_agent import get_agent +from controller.common.csi_logger import get_stdout_logger +from controller.common.utils import set_current_thread_name +from controller.controller_server import config, utils +from controller.controller_server.exception_handler import handle_common_exceptions, build_error_response +from controller.csi_general import replication_pb2 as pb2 +from controller.csi_general import replication_pb2_grpc as pb2_grpc + +logger = get_stdout_logger() + + +class ReplicationControllerServicer(pb2_grpc.ControllerServicer): + @handle_common_exceptions(pb2.EnableVolumeReplicationResponse) + def EnableVolumeReplication(self, request, context): + set_current_thread_name(request.volume_id) + logger.info("EnableVolumeReplication") + utils.validate_addons_request(request) + + volume_id_info = utils.get_volume_id_info(request.volume_id) + volume_id = volume_id_info.object_id + volume_internal_id = volume_id_info.internal_id + + other_volume_id_info = utils.get_volume_id_info(request.replication_id) + other_volume_internal_id = other_volume_id_info.internal_id + + other_system_id = request.parameters.get(config.PARAMETERS_SYSTEM_ID) + copy_type = request.parameters.get(config.PARAMETERS_COPY_TYPE, REPLICATION_DEFAULT_COPY_TYPE) + + connection_info = utils.get_array_connection_info_from_secrets(request.secrets) + with get_agent(connection_info, volume_id_info.array_type).get_mediator() as mediator: + volume = mediator.get_object_by_id(volume_id, config.VOLUME_TYPE_NAME) + if not volume: + raise array_errors.ObjectNotFoundError(volume_id) + replication = mediator.get_replication(volume_internal_id, other_volume_internal_id, other_system_id) + if replication: + if replication.copy_type != copy_type: + message = "replication already exists " \ + "but has copy type of {} and not {}".format(replication.copy_type, copy_type) + return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS, + pb2.EnableVolumeReplicationResponse) + logger.info("idempotent case. replication already exists " + "for volume {} with system: {}".format(volume.name, + other_system_id)) + return pb2.EnableVolumeReplicationResponse() + + logger.info("creating replication for volume {} with system: {}".format(volume.name, + other_system_id)) + mediator.create_replication(volume_internal_id, other_volume_internal_id, other_system_id, copy_type) + + return pb2.EnableVolumeReplicationResponse() + + @handle_common_exceptions(pb2.DisableVolumeReplicationResponse) + def DisableVolumeReplication(self, request, context): + set_current_thread_name(request.volume_id) + logger.info("DisableVolumeReplication") + utils.validate_addons_request(request) + + volume_id_info = utils.get_volume_id_info(request.volume_id) + volume_internal_id = volume_id_info.internal_id + + other_volume_id_info = utils.get_volume_id_info(request.replication_id) + other_volume_internal_id = other_volume_id_info.internal_id + + other_system_id = request.parameters.get(config.PARAMETERS_SYSTEM_ID) + + connection_info = utils.get_array_connection_info_from_secrets(request.secrets) + with get_agent(connection_info, volume_id_info.array_type).get_mediator() as mediator: + replication = mediator.get_replication(volume_internal_id, other_volume_internal_id, other_system_id) + if replication: + logger.info("deleting replication {} with system {}".format(replication.name, + other_system_id)) + mediator.delete_replication(replication.name) + else: + logger.info("idempotent case. replication is already deleted with system {}".format(other_system_id)) + + return pb2.DisableVolumeReplicationResponse() + + @staticmethod + def _ensure_volume_role_for_replication(mediator, replication, is_to_promote): + if is_to_promote: + if replication.is_primary: + logger.info("idempotent case. volume is already primary") + else: + logger.info("promoting volume for replication {}".format(replication.name)) + mediator.promote_replication_volume(replication.name) + else: + if replication.is_primary or replication.is_primary is None: + logger.info("demoting volume for replication {}".format(replication.name)) + mediator.demote_replication_volume(replication.name) + else: + logger.info("idempotent case. volume is already secondary") + + def _ensure_volume_role(self, request, context, is_to_promote, response_type): + set_current_thread_name(request.volume_id) + method_name = "PromoteVolume" if is_to_promote else "DemoteVolume" + logger.info(method_name) + utils.validate_addons_request(request) + + volume_id_info = utils.get_volume_id_info(request.volume_id) + volume_internal_id = volume_id_info.internal_id + + other_volume_id_info = utils.get_volume_id_info(request.replication_id) + other_volume_internal_id = other_volume_id_info.internal_id + + other_system_id = request.parameters.get(config.PARAMETERS_SYSTEM_ID) + + connection_info = utils.get_array_connection_info_from_secrets(request.secrets) + with get_agent(connection_info, volume_id_info.array_type).get_mediator() as mediator: + replication = mediator.get_replication(volume_internal_id, other_volume_internal_id, other_system_id) + if not replication: + message = "could not find replication for volume internal id: {} " \ + "with volume internal id: {} of system: {}".format(volume_internal_id, + other_volume_internal_id, + other_system_id) + return build_error_response(message, context, grpc.StatusCode.FAILED_PRECONDITION, response_type) + logger.info("found replication {} on system {}".format(replication.name, mediator.identifier)) + + self._ensure_volume_role_for_replication(mediator, replication, is_to_promote) + + logger.info("finished {}".format(method_name)) + return response_type() + + @handle_common_exceptions(pb2.PromoteVolumeResponse) + def PromoteVolume(self, request, context): + return self._ensure_volume_role(request, context, is_to_promote=True, response_type=pb2.PromoteVolumeResponse) + + @handle_common_exceptions(pb2.DemoteVolumeResponse) + def DemoteVolume(self, request, context): + return self._ensure_volume_role(request, context, is_to_promote=False, response_type=pb2.DemoteVolumeResponse) + + @handle_common_exceptions(pb2.ResyncVolumeResponse) + def ResyncVolume(self, request, context): + set_current_thread_name(request.volume_id) + logger.info("ResyncVolume") + utils.validate_addons_request(request) + + volume_id_info = utils.get_volume_id_info(request.volume_id) + volume_internal_id = volume_id_info.internal_id + + other_volume_id_info = utils.get_volume_id_info(request.replication_id) + other_volume_internal_id = other_volume_id_info.internal_id + + other_system_id = request.parameters.get(config.PARAMETERS_SYSTEM_ID) + + connection_info = utils.get_array_connection_info_from_secrets(request.secrets) + with get_agent(connection_info, volume_id_info.array_type).get_mediator() as mediator: + replication = mediator.get_replication(volume_internal_id, other_volume_internal_id, other_system_id) + if not replication: + message = "could not find replication for volume internal id: {} " \ + "with volume internal id: {} of system: {}".format(volume_internal_id, + other_volume_internal_id, + other_system_id) + return build_error_response(message, context, grpc.StatusCode.FAILED_PRECONDITION, + pb2.ResyncVolumeResponse) + + logger.info("is replication {} ready: {}".format(replication.name, replication.is_ready)) + return pb2.ResyncVolumeResponse(ready=replication.is_ready) diff --git a/controller/controller_server/config.py b/controller/controller_server/config.py index a7006acda..b85697722 100644 --- a/controller/controller_server/config.py +++ b/controller/controller_server/config.py @@ -24,14 +24,22 @@ - NUMBER_OF_DELIMITERS_IN_VOLUME_ID SECRET_VALIDATION_REGEX = '^[a-zA-Z0-9][a-zA-Z0-9-_.]*[a-zA-Z0-9]$' +VOLUME_CONTEXT_VOLUME_NAME = "volume_name" +VOLUME_CONTEXT_ARRAY_ADDRESS = "array_address" +VOLUME_CONTEXT_POOL = "pool_name" +VOLUME_CONTEXT_STORAGE_TYPE = "storage_type" + PARAMETERS_POOL = "pool" -PARAMETERS_BY_SYSTEM = "by_system_id" +PARAMETERS_BY_SYSTEM = "by_management_id" PARAMETERS_SPACE_EFFICIENCY = "SpaceEfficiency" PARAMETERS_VOLUME_NAME_PREFIX = "volume_name_prefix" PARAMETERS_SNAPSHOT_NAME_PREFIX = "snapshot_name_prefix" +PARAMETERS_SYSTEM_ID = "system_id" +PARAMETERS_COPY_TYPE = "copy_type" PARAMETERS_CAPACITY_DELIMITER = "=" PARAMETERS_CAPABILITIES_DELIMITER = "=" -PARAMETERS_OBJECT_ID_DELIMITER = ":" +PARAMETERS_OBJECT_ID_INFO_DELIMITER = ":" +PARAMETERS_OBJECT_IDS_DELIMITER = ";" PARAMETERS_NODE_ID_DELIMITER = ";" PARAMETERS_FC_WWN_DELIMITER = ":" PARAMETERS_TOPOLOGY_DELIMITER = "/" diff --git a/controller/controller_server/controller_server_manager.py b/controller/controller_server/controller_server_manager.py new file mode 100644 index 000000000..4565aae53 --- /dev/null +++ b/controller/controller_server/controller_server_manager.py @@ -0,0 +1,48 @@ +import time +from concurrent import futures + +import grpc + +from controller.common import settings +from controller.common.csi_logger import get_stdout_logger +from controller.controller_server.csi_controller_server import CSIControllerServicer +from controller.controller_server.addons_server import ReplicationControllerServicer +from controller.csi_general import csi_pb2_grpc +from controller.csi_general import replication_pb2_grpc + + +logger = get_stdout_logger() + + +class ControllerServerManager: + def __init__(self, array_endpoint): + self.endpoint = array_endpoint + self.csi_servicer = CSIControllerServicer() + self.replication_servicer = ReplicationControllerServicer() + + def start_server(self): + controller_server = grpc.server(futures.ThreadPoolExecutor(max_workers=settings.CSI_CONTROLLER_SERVER_WORKERS)) + + csi_pb2_grpc.add_ControllerServicer_to_server(self.csi_servicer, controller_server) + csi_pb2_grpc.add_IdentityServicer_to_server(self.csi_servicer, controller_server) + replication_pb2_grpc.add_ControllerServicer_to_server(self.replication_servicer, controller_server) + + # bind the server to the port defined above + # controller_server.add_insecure_port('[::]:{}'.format(self.server_port)) + # controller_server.add_insecure_port('unix://{}'.format(self.server_port)) + controller_server.add_insecure_port(self.endpoint) + + logger.info("Controller version: {}".format(self.csi_servicer.get_identity_config("version"))) + + # start the server + logger.debug("Listening for connections on endpoint address: {}".format(self.endpoint)) + + controller_server.start() + logger.debug('Controller Server running ...') + + try: + while True: + time.sleep(60 * 60 * 60) + except KeyboardInterrupt: + controller_server.stop(0) + logger.debug('Controller Server Stopped ...') diff --git a/controller/controller_server/controller_types.py b/controller/controller_server/controller_types.py index 0fafdef6a..010821b92 100644 --- a/controller/controller_server/controller_types.py +++ b/controller/controller_server/controller_types.py @@ -13,6 +13,7 @@ class ArrayConnectionInfo: class ObjectIdInfo: array_type: str system_id: str + internal_id: str object_id: str diff --git a/controller/controller_server/csi_controller_server.py b/controller/controller_server/csi_controller_server.py index 4caff5f25..086df758b 100755 --- a/controller/controller_server/csi_controller_server.py +++ b/controller/controller_server/csi_controller_server.py @@ -1,7 +1,4 @@ import os.path -import time -from concurrent import futures -from argparse import ArgumentParser import grpc import yaml @@ -13,27 +10,25 @@ from controller.array_action import messages from controller.array_action.storage_agent import get_agent, detect_array_type from controller.common import settings -from controller.common.csi_logger import get_stdout_logger, set_log_level +from controller.common.csi_logger import get_stdout_logger from controller.common.node_info import NodeIdInfo from controller.common.utils import set_current_thread_name -from controller.controller_server.errors import ObjectIdError, ValidationException -from controller.controller_server.exception_handler import handle_common_exceptions, handle_exception from controller.controller_server import messages as controller_messages +from controller.controller_server.errors import ObjectIdError, ValidationException +from controller.controller_server.exception_handler import handle_common_exceptions, handle_exception, \ + build_error_response from controller.csi_general import csi_pb2 from controller.csi_general import csi_pb2_grpc logger = get_stdout_logger() -class ControllerServicer(csi_pb2_grpc.ControllerServicer): +class CSIControllerServicer(csi_pb2_grpc.ControllerServicer): """ gRPC server for Digestor Service """ - def __init__(self, array_endpoint): - - self.endpoint = array_endpoint - + def __init__(self): my_path = os.path.abspath(os.path.dirname(__file__)) path = os.path.join(my_path, "../../common/config.yaml") @@ -47,10 +42,8 @@ def CreateVolume(self, request, context): try: utils.validate_create_volume_request(request) except ObjectIdError as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.NOT_FOUND) - return csi_pb2.CreateVolumeResponse() + return handle_exception(ex, context, grpc.StatusCode.NOT_FOUND, + csi_pb2.CreateVolumeResponse) logger.debug("volume name : {}".format(request.name)) @@ -84,10 +77,8 @@ def CreateVolume(self, request, context): if required_bytes > max_size: message = messages.SizeOutOfRangeError_message.format(required_bytes, max_size) - logger.error(message) - context.set_details(message) - context.set_code(grpc.StatusCode.OUT_OF_RANGE) - return csi_pb2.CreateVolumeResponse() + return build_error_response(message, context, grpc.StatusCode.OUT_OF_RANGE, + csi_pb2.CreateVolumeResponse) if required_bytes == 0: required_bytes = min_size @@ -109,9 +100,9 @@ def CreateVolume(self, request, context): logger.debug("volume found : {}".format(volume)) if not source_id and volume.capacity_bytes != request.capacity_range.required_bytes: - context.set_details("Volume was already created with different size.") - context.set_code(grpc.StatusCode.ALREADY_EXISTS) - return csi_pb2.CreateVolumeResponse() + message = "Volume was already created with different size." + return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS, + csi_pb2.CreateVolumeResponse) copy_source_res = self._handle_existing_volume_source(volume, source_id, source_type, array_connection_info.system_id, @@ -122,30 +113,29 @@ def CreateVolume(self, request, context): if source_id: self._copy_to_existing_volume_from_source(volume, source_id, source_type, required_bytes, - array_mediator, pool) + array_mediator) volume.copy_source_id = source_id res = utils.generate_csi_create_volume_response(volume, array_connection_info.system_id, source_type) logger.info("finished create volume") return res except array_errors.InvalidArgumentError as ex: - handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, csi_pb2.CreateVolumeResponse) + return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, csi_pb2.CreateVolumeResponse) except array_errors.VolumeAlreadyExists as ex: - handle_exception(ex, context, grpc.StatusCode.ALREADY_EXISTS, csi_pb2.CreateVolumeResponse) + return handle_exception(ex, context, grpc.StatusCode.ALREADY_EXISTS, csi_pb2.CreateVolumeResponse) def _copy_to_existing_volume_from_source(self, volume, source_id, source_type, - minimum_volume_size, array_mediator, pool): - volume_name = volume.name + minimum_volume_size, array_mediator): + volume_id = volume.id try: source_object = array_mediator.get_object_by_id(source_id, source_type) if not source_object: self._rollback_create_volume_from_source(array_mediator, volume.id) raise array_errors.ObjectNotFoundError(source_id) - source_name = source_object.name source_capacity = source_object.capacity_bytes - logger.debug("Copy {0} {1} data to volume {2}.".format(source_type, source_id, volume_name)) - array_mediator.copy_to_existing_volume_from_source(volume_name, source_name, - source_capacity, minimum_volume_size, pool) + logger.debug("Copy {0} {1} data to volume {2}.".format(source_type, source_id, volume_id)) + array_mediator.copy_to_existing_volume_from_source(volume_id, source_id, + source_capacity, minimum_volume_size) logger.debug("Copy volume from {0} finished".format(source_type)) except array_errors.ObjectNotFoundError as ex: logger.error("Volume not found while copying {0} data to volume".format(source_type)) @@ -193,74 +183,51 @@ def _handle_volume_exists_with_same_source(self, context, source_id, source_type def _handle_volume_exists_with_different_source(self, context, source_id, source_type, volume_name): logger.debug( "Volume {0} exists but it is not a copy of {1} {2}.".format(volume_name, source_type, source_id)) - context.set_details("Volume already exists but it was created from a different source.") - context.set_code(grpc.StatusCode.ALREADY_EXISTS) - return csi_pb2.CreateVolumeResponse() + message = "Volume already exists but it was created from a different source." + return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS, csi_pb2.CreateVolumeResponse) + @handle_common_exceptions(csi_pb2.DeleteVolumeResponse) def DeleteVolume(self, request, context): set_current_thread_name(request.volume_id) logger.info("DeleteVolume") secrets = request.secrets + utils.validate_delete_volume_request(request) try: - utils.validate_delete_volume_request(request) - - try: - volume_id_info = utils.get_volume_id_info(request.volume_id) - except ObjectIdError as ex: - logger.warning("volume id is invalid. error : {}".format(ex)) - return csi_pb2.DeleteVolumeResponse() - system_id = volume_id_info.system_id - array_type = volume_id_info.array_type - volume_id = volume_id_info.object_id - array_connection_info = utils.get_array_connection_info_from_secrets(secrets, system_id=system_id) - - with get_agent(array_connection_info, array_type).get_mediator() as array_mediator: - logger.debug(array_mediator) - - try: - - logger.debug("Deleting volume {0}".format(volume_id)) - array_mediator.delete_volume(volume_id) + volume_id_info = utils.get_volume_id_info(request.volume_id) + except ObjectIdError as ex: + logger.warning("volume id is invalid. error : {}".format(ex)) + return csi_pb2.DeleteVolumeResponse() - except array_errors.ObjectNotFoundError as ex: - logger.debug("volume was not found during deletion: {0}".format(ex)) + system_id = volume_id_info.system_id + array_type = volume_id_info.array_type + volume_id = volume_id_info.object_id + array_connection_info = utils.get_array_connection_info_from_secrets(secrets, system_id=system_id) - except array_errors.PermissionDeniedError as ex: - context.set_code(grpc.StatusCode.PERMISSION_DENIED) - context.set_details(ex.message) - return csi_pb2.DeleteVolumeResponse() + with get_agent(array_connection_info, array_type).get_mediator() as array_mediator: + logger.debug(array_mediator) - except array_errors.ObjectIsStillInUseError as ex: - logger.info("could not delete volume while in use: {0}".format(ex)) - context.set_code(grpc.StatusCode.FAILED_PRECONDITION) - context.set_details(ex.message) - return csi_pb2.DeleteVolumeResponse() - - except (ValidationException, array_errors.IllegalObjectID) as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return csi_pb2.DeleteVolumeResponse() + try: + logger.debug("Deleting volume {0}".format(volume_id)) + array_mediator.delete_volume(volume_id) - except Exception as ex: - logger.debug("an internal exception occurred") - logger.exception(ex) - context.set_code(grpc.StatusCode.INTERNAL) - context.set_details('an internal exception occurred : {}'.format(ex)) - return csi_pb2.DeleteVolumeResponse() + except array_errors.ObjectNotFoundError as ex: + logger.debug("volume was not found during deletion: {0}".format(ex)) + except array_errors.PermissionDeniedError as ex: + return handle_exception(ex, context, grpc.StatusCode.PERMISSION_DENIED, + csi_pb2.DeleteVolumeResponse) logger.debug("generating delete volume response") res = csi_pb2.DeleteVolumeResponse() logger.info("finished DeleteVolume") return res + @handle_common_exceptions(csi_pb2.ControllerPublishVolumeResponse) def ControllerPublishVolume(self, request, context): set_current_thread_name(request.volume_id) logger.info("ControllerPublishVolume") + utils.validate_publish_volume_request(request) try: - utils.validate_publish_volume_request(request) - volume_id_info = utils.get_volume_id_info(request.volume_id) system_id = volume_id_info.system_id array_type = volume_id_info.array_type @@ -283,55 +250,23 @@ def ControllerPublishVolume(self, request, context): return res except array_errors.VolumeMappedToMultipleHostsError as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.FAILED_PRECONDITION) - return csi_pb2.ControllerPublishVolumeResponse() - - except array_errors.PermissionDeniedError as ex: - context.set_code(grpc.StatusCode.PERMISSION_DENIED) - context.set_details(ex.message) - return csi_pb2.ControllerPublishVolumeResponse() - + return handle_exception(ex, context, grpc.StatusCode.FAILED_PRECONDITION, + csi_pb2.ControllerPublishVolumeResponse) except (array_errors.LunAlreadyInUseError, array_errors.NoAvailableLunError) as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) - return csi_pb2.ControllerPublishVolumeResponse() - - except (array_errors.HostNotFoundError, array_errors.ObjectNotFoundError, - array_errors.NoIscsiTargetsFoundError, ObjectIdError) as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.NOT_FOUND) - return csi_pb2.ControllerPublishVolumeResponse() - - except (ValidationException, array_errors.IllegalObjectID, - array_errors.UnsupportedConnectivityTypeError) as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return csi_pb2.ControllerPublishVolumeResponse() - - except Exception as ex: - logger.debug("an internal exception occurred") - logger.exception(ex) - context.set_code(grpc.StatusCode.INTERNAL) - context.set_details('an internal exception occurred : {}'.format(ex)) - return csi_pb2.ControllerPublishVolumeResponse() - + return handle_exception(ex, context, grpc.StatusCode.RESOURCE_EXHAUSTED, + csi_pb2.ControllerPublishVolumeResponse) + except (array_errors.NoIscsiTargetsFoundError, ObjectIdError) as ex: + return handle_exception(ex, context, grpc.StatusCode.NOT_FOUND, csi_pb2.ControllerPublishVolumeResponse) + except array_errors.UnsupportedConnectivityTypeError as ex: + return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, + csi_pb2.ControllerPublishVolumeResponse) + + @handle_common_exceptions(csi_pb2.ControllerUnpublishVolumeResponse) def ControllerUnpublishVolume(self, request, context): set_current_thread_name(request.volume_id) logger.info("ControllerUnpublishVolume") + utils.validate_unpublish_volume_request(request) try: - try: - utils.validate_unpublish_volume_request(request) - except ValidationException as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return csi_pb2.ControllerUnpublishVolumeResponse() - volume_id_info = utils.get_volume_id_info(request.volume_id) system_id = volume_id_info.system_id array_type = volume_id_info.array_type @@ -349,57 +284,65 @@ def ControllerUnpublishVolume(self, request, context): logger.info("finished ControllerUnpublishVolume") return csi_pb2.ControllerUnpublishVolumeResponse() - + except ObjectIdError as ex: + return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, + array_errors.VolumeAlreadyUnmappedError) except array_errors.VolumeAlreadyUnmappedError: logger.debug("Idempotent case. volume is already unmapped.") return csi_pb2.ControllerUnpublishVolumeResponse() - - except array_errors.ObjectNotFoundError as ex: + except array_errors.ObjectNotFoundError: logger.debug("Idempotent case. volume is already deleted.") return csi_pb2.ControllerUnpublishVolumeResponse() - except array_errors.PermissionDeniedError as ex: - context.set_code(grpc.StatusCode.PERMISSION_DENIED) - context.set_details(ex.message) - return csi_pb2.ControllerUnpublishVolumeResponse() + @handle_common_exceptions(csi_pb2.ValidateVolumeCapabilitiesResponse) + def ValidateVolumeCapabilities(self, request, context): + logger.info("ValidateVolumeCapabilities") + try: + utils.validate_validate_volume_capabilities_request(request) - except array_errors.HostNotFoundError as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.NOT_FOUND) - return csi_pb2.ControllerUnpublishVolumeResponse() + volume_id_info = utils.get_volume_id_info(request.volume_id) + system_id = volume_id_info.system_id + array_type = volume_id_info.array_type + volume_id = volume_id_info.object_id - except Exception as ex: - logger.debug("an internal exception occurred") - logger.exception(ex) - context.set_code(grpc.StatusCode.INTERNAL) - context.set_details('an internal exception occurred : {}'.format(ex)) - return csi_pb2.ControllerUnpublishVolumeResponse() + array_connection_info = utils.get_array_connection_info_from_secrets(request.secrets, + system_id=system_id) - def ValidateVolumeCapabilities(self, request, context): - logger.info("ValidateVolumeCapabilities") - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - logger.info("finished ValidateVolumeCapabilities") - return csi_pb2.ValidateVolumeCapabilitiesResponse() + with get_agent(array_connection_info, array_type).get_mediator() as array_mediator: + + volume = array_mediator.get_object_by_id(object_id=volume_id, object_type=config.VOLUME_TYPE_NAME) + + if not volume: + raise array_errors.ObjectNotFoundError(volume_id) + logger.debug("volume found : {}".format(volume)) + + if request.volume_context: + utils.validate_volume_context_match_volume(request.volume_context, volume) + if request.parameters: + utils.validate_parameters_match_volume(request.parameters, volume) + + logger.info("finished ValidateVolumeCapabilities") + return utils.generate_csi_validate_volume_capabilities_response(request.volume_context, + request.volume_capabilities, + request.parameters) + except ObjectIdError as ex: + return handle_exception(ex, context, grpc.StatusCode.NOT_FOUND, + csi_pb2.CreateSnapshotResponse) + except array_errors.SpaceEfficiencyNotSupported as ex: + return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, + csi_pb2.CreateSnapshotResponse) + + @handle_common_exceptions(csi_pb2.ListVolumesResponse) def ListVolumes(self, request, context): logger.info("ListVolumes") - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - logger.info("finished ListVolumes") - return csi_pb2.ListVolumesResponse() + raise NotImplementedError() + @handle_common_exceptions(csi_pb2.CreateSnapshotResponse) def CreateSnapshot(self, request, context): set_current_thread_name(request.name) logger.info("Create snapshot") - try: - utils.validate_create_snapshot_request(request) - except ValidationException as ex: - logger.error("failed request validation") - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return csi_pb2.CreateSnapshotResponse() - + utils.validate_create_snapshot_request(request) source_volume_id = request.source_volume_id logger.info("Snapshot base name : {}. Source volume id : {}".format(request.name, source_volume_id)) secrets = request.secrets @@ -412,6 +355,7 @@ def CreateSnapshot(self, request, context): snapshot_parameters = utils.get_snapshot_parameters(parameters=request.parameters, system_id=array_connection_info.system_id) pool = snapshot_parameters.pool + space_efficiency = snapshot_parameters.space_efficiency with get_agent(array_connection_info, array_type).get_mediator() as array_mediator: logger.debug(array_mediator) snapshot_final_name = self._get_snapshot_final_name(snapshot_parameters, request.name, array_mediator) @@ -425,58 +369,46 @@ def CreateSnapshot(self, request, context): if snapshot: if snapshot.source_volume_id != volume_id: - context.set_details( - messages.SnapshotWrongVolumeError_message.format(snapshot_final_name, - snapshot.source_volume_id, - volume_id)) - context.set_code(grpc.StatusCode.ALREADY_EXISTS) - return csi_pb2.CreateSnapshotResponse() + message = messages.SnapshotWrongVolumeError_message.format(snapshot_final_name, + snapshot.source_volume_id, + volume_id) + return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS, + csi_pb2.CreateSnapshotResponse) else: logger.debug( "Snapshot doesn't exist. Creating a new snapshot {0} from volume {1}".format( snapshot_final_name, volume_id)) - snapshot = array_mediator.create_snapshot(volume_id, snapshot_final_name, pool) + array_mediator.validate_supported_space_efficiency(space_efficiency) + snapshot = array_mediator.create_snapshot(volume_id, snapshot_final_name, space_efficiency, pool) logger.debug("generating create snapshot response") - res = utils.generate_csi_create_snapshot_response(snapshot, source_volume_id) + res = utils.generate_csi_create_snapshot_response(snapshot, system_id, source_volume_id) logger.info("finished create snapshot") return res - except (array_errors.IllegalObjectName, array_errors.IllegalObjectID, - array_errors.SnapshotSourcePoolMismatch) as ex: - context.set_details(ex.message) - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return csi_pb2.CreateSnapshotResponse() - except array_errors.ObjectNotFoundError as ex: - context.set_code(grpc.StatusCode.NOT_FOUND) - context.set_details(ex.message) - return csi_pb2.CreateSnapshotResponse() - except array_errors.PermissionDeniedError as ex: - context.set_code(grpc.StatusCode.PERMISSION_DENIED) - context.set_details(ex.message) - return csi_pb2.CreateSnapshotResponse() + except (ObjectIdError, array_errors.SnapshotSourcePoolMismatch, array_errors.SpaceEfficiencyNotSupported) as ex: + return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, + csi_pb2.CreateSnapshotResponse) except array_errors.SnapshotAlreadyExists as ex: - context.set_details(ex.message) - context.set_code(grpc.StatusCode.ALREADY_EXISTS) - return csi_pb2.CreateSnapshotResponse() - except Exception as ex: - logger.error("an internal exception occurred") - logger.exception(ex) - context.set_code(grpc.StatusCode.INTERNAL) - context.set_details('an internal exception occurred : {}'.format(ex)) - return csi_pb2.CreateSnapshotResponse() + return handle_exception(ex, context, grpc.StatusCode.ALREADY_EXISTS, + csi_pb2.CreateSnapshotResponse) + except array_errors.NotEnoughSpaceInPool as ex: + return handle_exception(ex, context, grpc.StatusCode.RESOURCE_EXHAUSTED, + csi_pb2.CreateSnapshotResponse) + @handle_common_exceptions(csi_pb2.DeleteSnapshotResponse) def DeleteSnapshot(self, request, context): set_current_thread_name(request.snapshot_id) logger.info("Delete snapshot") secrets = request.secrets + utils.validate_delete_snapshot_request(request) try: - utils.validate_delete_snapshot_request(request) try: snapshot_id_info = utils.get_snapshot_id_info(request.snapshot_id) except ObjectIdError as ex: logger.warning("Snapshot id is invalid. error : {}".format(ex)) return csi_pb2.DeleteSnapshotResponse() + system_id = snapshot_id_info.system_id array_type = snapshot_id_info.array_type snapshot_id = snapshot_id_info.object_id @@ -485,7 +417,6 @@ def DeleteSnapshot(self, request, context): logger.debug(array_mediator) try: array_mediator.delete_snapshot(snapshot_id) - except array_errors.ObjectNotFoundError as ex: logger.debug("Snapshot was not found during deletion: {0}".format(ex)) @@ -493,52 +424,29 @@ def DeleteSnapshot(self, request, context): logger.debug("snapshot was not found during deletion: {0}".format(ex.message)) context.set_code(grpc.StatusCode.OK) return csi_pb2.DeleteSnapshotResponse() - except array_errors.ObjectIsStillInUseError as ex: - logger.info("could not delete snapshot while in use: {0}".format(ex.message)) - context.set_code(grpc.StatusCode.FAILED_PRECONDITION) - context.set_details(ex.message) - return csi_pb2.DeleteSnapshotResponse() - except array_errors.PermissionDeniedError as ex: - context.set_code(grpc.StatusCode.PERMISSION_DENIED) - context.set_details(ex.message) - return csi_pb2.DeleteSnapshotResponse() - except (ValidationException, array_errors.IllegalObjectID) as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return csi_pb2.DeleteSnapshotResponse() - except Exception as ex: - logger.debug("an internal exception occurred") - logger.exception(ex) - context.set_code(grpc.StatusCode.INTERNAL) - context.set_details('an internal exception occurred : {}'.format(ex)) - return csi_pb2.DeleteSnapshotResponse() logger.debug("generating delete snapshot response") res = csi_pb2.DeleteSnapshotResponse() logger.info("finished DeleteSnapshot") return res + @handle_common_exceptions(csi_pb2.GetCapacityResponse) def GetCapacity(self, request, context): logger.info("GetCapacity") - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - logger.info("finished GetCapacity") - return csi_pb2.GetCapacityResponse() + raise NotImplementedError() + @handle_common_exceptions(csi_pb2.ControllerExpandVolumeResponse) def ControllerExpandVolume(self, request, context): set_current_thread_name(request.volume_id) logger.info("ControllerExpandVolume") secrets = request.secrets - + utils.validate_expand_volume_request(request) + try: + volume_id_info = utils.get_volume_id_info(request.volume_id) + except ObjectIdError as ex: + return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, + csi_pb2.ControllerExpandVolumeResponse) try: - utils.validate_expand_volume_request(request) - - try: - volume_id_info = utils.get_volume_id_info(request.volume_id) - except ObjectIdError as ex: - logger.warning("volume id is invalid. error : {}".format(ex)) - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return csi_pb2.ControllerExpandVolumeResponse() system_id = volume_id_info.system_id array_type = volume_id_info.array_type volume_id = volume_id_info.object_id @@ -560,10 +468,8 @@ def ControllerExpandVolume(self, request, context): if required_bytes > max_size: message = messages.SizeOutOfRangeError_message.format(required_bytes, max_size) - logger.error(message) - context.set_code(grpc.StatusCode.OUT_OF_RANGE) - context.set_details(message) - return csi_pb2.ControllerExpandVolumeResponse() + return build_error_response(message, context, grpc.StatusCode.OUT_OF_RANGE, + csi_pb2.ControllerExpandVolumeResponse) logger.debug("expanding volume {0}".format(volume_id)) array_mediator.expand_volume( @@ -578,35 +484,9 @@ def ControllerExpandVolume(self, request, context): logger.info("finished expanding volume") return res - except array_errors.PermissionDeniedError as ex: - context.set_code(grpc.StatusCode.PERMISSION_DENIED) - context.set_details(ex.message) - return csi_pb2.ControllerExpandVolumeResponse() - - except array_errors.ObjectNotFoundError as ex: - logger.info("Volume not found: {0}".format(ex)) - context.set_code(grpc.StatusCode.NOT_FOUND) - context.set_details(ex.message) - return csi_pb2.ControllerExpandVolumeResponse() - - except (ValidationException, array_errors.IllegalObjectID) as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return csi_pb2.ControllerExpandVolumeResponse() - except array_errors.NotEnoughSpaceInPool as ex: - logger.exception(ex) - context.set_details(ex.message) - context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) - return csi_pb2.ControllerExpandVolumeResponse() - - except Exception as ex: - logger.debug("an internal exception occurred") - logger.exception(ex) - context.set_code(grpc.StatusCode.INTERNAL) - context.set_details('an internal exception occurred : {}'.format(ex)) - return csi_pb2.ControllerExpandVolumeResponse() + return handle_exception(ex, context, grpc.StatusCode.RESOURCE_EXHAUSTED, + csi_pb2.ControllerExpandVolumeResponse) def ControllerGetCapabilities(self, request, context): logger.info("ControllerGetCapabilities") @@ -627,25 +507,18 @@ def ControllerGetCapabilities(self, request, context): logger.info("finished ControllerGetCapabilities") return res - def __get_identity_config(self, attribute_name): + def get_identity_config(self, attribute_name): return self.cfg['identity'][attribute_name] + @handle_common_exceptions(csi_pb2.GetPluginInfoResponse) def GetPluginInfo(self, _, context): logger.info("GetPluginInfo") - try: - name = self.__get_identity_config("name") - version = self.__get_identity_config("version") - except Exception as ex: - logger.exception(ex) - context.set_code(grpc.StatusCode.INTERNAL) - context.set_details('an error occurred while trying to get plugin name or version') - return csi_pb2.GetPluginInfoResponse() + name = self.get_identity_config("name") + version = self.get_identity_config("version") if not name or not version: - logger.error("plugin name or version cannot be empty") - context.set_code(grpc.StatusCode.INTERNAL) - context.set_details("plugin name or version cannot be empty") - return csi_pb2.GetPluginInfoResponse() + message = "plugin name or version cannot be empty" + return build_error_response(message, context, grpc.StatusCode.INTERNAL, csi_pb2.GetPluginInfoResponse) logger.info("finished GetPluginInfo") return csi_pb2.GetPluginInfoResponse(name=name, vendor_version=version) @@ -688,7 +561,7 @@ def GetPluginCapabilities(self, _, __): logger.info("GetPluginCapabilities") service_type = csi_pb2.PluginCapability.Service.Type volume_expansion_type = csi_pb2.PluginCapability.VolumeExpansion.Type - capabilities = self.__get_identity_config("capabilities") + capabilities = self.get_identity_config("capabilities") capability_list = [] service_capabilities = capabilities.get('Service') volume_expansion_capability = capabilities.get('VolumeExpansion') @@ -711,32 +584,6 @@ def Probe(self, _, context): context.set_code(grpc.StatusCode.OK) return csi_pb2.ProbeResponse() - def start_server(self): - controller_server = grpc.server(futures.ThreadPoolExecutor(max_workers=settings.CSI_CONTROLLER_SERVER_WORKERS)) - - csi_pb2_grpc.add_ControllerServicer_to_server(self, controller_server) - csi_pb2_grpc.add_IdentityServicer_to_server(self, controller_server) - - # bind the server to the port defined above - # controller_server.add_insecure_port('[::]:{}'.format(self.server_port)) - # controller_server.add_insecure_port('unix://{}'.format(self.server_port)) - controller_server.add_insecure_port(self.endpoint) - - logger.info("Controller version: {}".format(self.__get_identity_config("version"))) - - # start the server - logger.debug("Listening for connections on endpoint address: {}".format(self.endpoint)) - - controller_server.start() - logger.debug('Controller Server running ...') - - try: - while True: - time.sleep(60 * 60 * 60) - except KeyboardInterrupt: - controller_server.stop(0) - logger.debug('Controller Server Stopped ...') - def _get_source_type_and_id(self, request): source = request.volume_content_source object_id = None @@ -754,19 +601,3 @@ def _get_source_type_and_id(self, request): object_id_info = utils.get_object_id_info(source_id, source_type) object_id = object_id_info.object_id return source_type, object_id - - -def main(): - parser = ArgumentParser() - parser.add_argument("-e", "--csi-endpoint", dest="endpoint", help="grpc endpoint") - parser.add_argument("-l", "--loglevel", dest="loglevel", help="log level") - arguments = parser.parse_args() - - set_log_level(arguments.loglevel) - - controller_servicer = ControllerServicer(arguments.endpoint) - controller_servicer.start_server() - - -if __name__ == '__main__': - main() diff --git a/controller/controller_server/exception_handler.py b/controller/controller_server/exception_handler.py index 4baf97b07..47ba9e054 100644 --- a/controller/controller_server/exception_handler.py +++ b/controller/controller_server/exception_handler.py @@ -9,29 +9,41 @@ logger = get_stdout_logger() status_codes_by_exception = { + NotImplementedError: grpc.StatusCode.UNIMPLEMENTED, ValidationException: grpc.StatusCode.INVALID_ARGUMENT, + array_errors.IllegalObjectID: grpc.StatusCode.INVALID_ARGUMENT, array_errors.IllegalObjectName: grpc.StatusCode.INVALID_ARGUMENT, array_errors.PoolParameterIsMissing: grpc.StatusCode.INVALID_ARGUMENT, array_errors.ObjectNotFoundError: grpc.StatusCode.NOT_FOUND, + array_errors.HostNotFoundError: grpc.StatusCode.NOT_FOUND, array_errors.PermissionDeniedError: grpc.StatusCode.PERMISSION_DENIED, - array_errors.NotEnoughSpaceInPool: grpc.StatusCode.RESOURCE_EXHAUSTED + array_errors.ObjectIsStillInUseError: grpc.StatusCode.FAILED_PRECONDITION } -def handle_exception(ex, context, status_code, response_type): - logger.exception(ex) - context.set_details(str(ex)) +def _build_non_ok_response(message, context, status_code, response_type): + context.set_details(message) context.set_code(status_code) return response_type() +def build_error_response(message, context, status_code, response_type): + logger.error(message) + return _build_non_ok_response(message, context, status_code, response_type) + + +def handle_exception(ex, context, status_code, response_type): + logger.exception(ex) + return _build_non_ok_response(str(ex), context, status_code, response_type) + + def handle_common_exceptions(response_type): @decorator def handle_common_exceptions_with_response(controller_method, servicer, request, context): try: return controller_method(servicer, request, context) except Exception as ex: - return handle_exception(ex, context, status_codes_by_exception.get(type(ex), grpc.StatusCode.INTERNAL), - response_type) + status_code = status_codes_by_exception.get(type(ex), grpc.StatusCode.INTERNAL) + return handle_exception(ex, context, status_code, response_type) return handle_common_exceptions_with_response diff --git a/controller/controller_server/main.py b/controller/controller_server/main.py new file mode 100644 index 000000000..41f06b395 --- /dev/null +++ b/controller/controller_server/main.py @@ -0,0 +1,20 @@ +from argparse import ArgumentParser + +from controller.common.csi_logger import set_log_level +from controller.controller_server.controller_server_manager import ControllerServerManager + + +def main(): + parser = ArgumentParser() + parser.add_argument("-e", "--csi-endpoint", dest="endpoint", help="grpc endpoint") + parser.add_argument("-l", "--loglevel", dest="loglevel", help="log level") + arguments = parser.parse_args() + + set_log_level(arguments.loglevel) + + server_manager = ControllerServerManager(arguments.endpoint) + server_manager.start_server() + + +if __name__ == '__main__': + main() diff --git a/controller/controller_server/messages.py b/controller/controller_server/messages.py index f08ffa740..d9f67d933 100644 --- a/controller/controller_server/messages.py +++ b/controller/controller_server/messages.py @@ -10,20 +10,28 @@ secret_missing_topologies_message = "secret is missing topologies" invalid_system_id_message = "got an invalid system id: {}, validation regex: {}" invalid_json_parameter_message = "got an invalid json parameter: {}, error: {}." +invalid_replication_copy_type_message = "got an invalid copy type: {}" secret_missing_message = 'secret is missing' capabilities_not_set_message = "capabilities were not set" unsupported_fs_type_message = "unsupported fs_type : {}" +unsupported_mount_flags_message = "mount_flags is unsupported" unsupported_volume_access_type_message = "unsupported volume access type" unsupported_access_mode_message = "unsupported access mode : {}" name_should_not_be_empty_message = 'name should not be empty' -id_should_not_be_empty_message = 'id should not be empty' +volume_id_should_not_be_empty_message = 'volume id should not be empty' +snapshot_id_should_not_be_empty_message = 'snapshot id should not be empty' size_should_not_be_negative_message = 'size should not be negative' no_capacity_range_message = 'no capacity range set' pool_is_missing_message = 'pool parameter is missing.' pool_should_not_be_empty_message = 'pool should not be empty' -params_are_missing_message = 'parameters are missing' volume_id_wrong_format_message = 'volume id has wrong format' readonly_not_supported_message = 'readonly parameter is not supported' volume_source_id_is_missing = 'volume source {0} id is missing' snapshot_src_volume_id_is_missing = 'snapshot source volume id is missing' parameter_length_is_too_long = '{} parameter: {} is too long, max length is: {}' +volume_cloning_not_supported_message = 'volume cloning is not supported' +volume_context_not_match_volume_message = 'volume context: {0} does not match existing volume context: {1}' +space_efficiency_not_match_volume_message = 'space efficiency: {0}' \ + ' does not match existing volume space efficiency: {1}' +pool_not_match_volume_message = 'pool name: {0} does not match existing volume pool name: {1}' +prefix_not_match_volume_message = 'prefix: {0} does not match existing volume name: {1}' diff --git a/controller/controller_server/test_settings.py b/controller/controller_server/test_settings.py index 0c06761b2..7eac2bbc7 100644 --- a/controller/controller_server/test_settings.py +++ b/controller/controller_server/test_settings.py @@ -2,8 +2,15 @@ password = "temp" array = "arr" pool = "pool1" +space_efficiency = "thin" volume_name = "volume" +volume_wwn = "volume_wwn" +object_internal_id = "object_internal_id" +other_object_internal_id = "other_object_internal_id" snapshot_name = "snapshot" snapshot_volume_wwn = "12345678" snapshot_volume_name = "snapshot_volume" clone_volume_name = "clone_volume" +replication_name = "replication_name" +system_id = "system_id" +copy_type = "async" diff --git a/controller/controller_server/utils.py b/controller/controller_server/utils.py index 04894b407..7d54a0e2c 100644 --- a/controller/controller_server/utils.py +++ b/controller/controller_server/utils.py @@ -1,6 +1,7 @@ import json import re from hashlib import sha256 +from operator import eq import base58 from google.protobuf.timestamp_pb2 import Timestamp @@ -8,8 +9,10 @@ import controller.array_action.errors as array_errors import controller.controller_server.config as config import controller.controller_server.messages as messages -from controller.array_action.config import FC_CONNECTIVITY_TYPE, ISCSI_CONNECTIVITY_TYPE +from controller.array_action.config import FC_CONNECTIVITY_TYPE, ISCSI_CONNECTIVITY_TYPE, \ + REPLICATION_COPY_TYPE_SYNC, REPLICATION_COPY_TYPE_ASYNC from controller.common.csi_logger import get_stdout_logger +from controller.common.settings import NAME_PREFIX_SEPARATOR from controller.controller_server.controller_types import ArrayConnectionInfo, ObjectIdInfo, ObjectParameters from controller.controller_server.errors import ObjectIdError, ValidationException from controller.csi_general import csi_pb2 @@ -42,6 +45,7 @@ def get_volume_topologies(request): topologies = accessibility_requirements.preferred[0].segments logger.info("Chosen volume topologies: {}".format(topologies)) return topologies + return None def _get_system_info_for_topologies(secrets_config, node_topologies): @@ -79,7 +83,7 @@ def get_array_connection_info_from_secrets(secrets, topologies=None, system_id=N return _get_array_connection_info_from_system_info(system_info, system_id) -def get_volume_parameters(parameters, system_id): +def get_volume_parameters(parameters, system_id=None): return get_object_parameters(parameters, config.PARAMETERS_VOLUME_NAME_PREFIX, system_id) @@ -106,14 +110,15 @@ def get_volume_id(new_volume, system_id): return _get_object_id(new_volume, system_id) -def get_snapshot_id(new_snapshot): - return _get_object_id(new_snapshot) +def get_snapshot_id(new_snapshot, system_id): + return _get_object_id(new_snapshot, system_id) -def _get_object_id(obj, system_id=None): +def _get_object_id(obj, system_id): + object_ids_value = config.PARAMETERS_OBJECT_IDS_DELIMITER.join((obj.internal_id, obj.id)) if system_id: - return config.PARAMETERS_OBJECT_ID_DELIMITER.join((obj.array_type, system_id, obj.id)) - return config.PARAMETERS_OBJECT_ID_DELIMITER.join((obj.array_type, obj.id)) + return config.PARAMETERS_OBJECT_ID_INFO_DELIMITER.join((obj.array_type, system_id, object_ids_value)) + return config.PARAMETERS_OBJECT_ID_INFO_DELIMITER.join((obj.array_type, object_ids_value)) def _is_system_id_valid(system_id): @@ -169,10 +174,12 @@ def validate_secrets(secrets): def validate_csi_volume_capability(cap): - logger.debug("validating csi volume capability : {0}".format(cap)) + logger.debug("validating csi volume capability") if cap.HasField(config.VOLUME_CAPABILITIES_FIELD_ACCESS_TYPE_MOUNT): if cap.mount.fs_type and (cap.mount.fs_type not in config.SUPPORTED_FS_TYPES): raise ValidationException(messages.unsupported_fs_type_message.format(cap.mount.fs_type)) + if cap.mount.mount_flags: + raise ValidationException(messages.unsupported_mount_flags_message) elif not cap.HasField(config.VOLUME_CAPABILITIES_FIELD_ACCESS_TYPE_BLOCK): # should never get here since the value can be only mount (for fs volume) or block (for raw block) @@ -186,9 +193,9 @@ def validate_csi_volume_capability(cap): logger.debug("csi volume capabilities validation finished.") -def validate_csi_volume_capabilties(capabilities): - logger.debug("validating csi volume capabilities: {}".format(capabilities)) - if len(capabilities) == 0: +def validate_csi_volume_capabilities(capabilities): + logger.debug("validating csi volume capabilities") + if not capabilities: raise ValidationException(messages.capabilities_not_set_message) for cap in capabilities: @@ -211,10 +218,29 @@ def _validate_source_info(source, source_type): source_object = getattr(source, source_type) logger.info("Source {0} specified: {1}".format(source_type, source_object)) source_object_id = getattr(source_object, config.VOLUME_SOURCE_ID_FIELDS[source_type]) - if not source_object_id: - raise ValidationException(messages.volume_source_id_is_missing.format(source_type)) - if config.PARAMETERS_OBJECT_ID_DELIMITER not in source_object_id: - raise ObjectIdError(source_type, source_object_id) + message = messages.volume_source_id_is_missing.format(source_type) + _validate_object_id(source_object_id, object_type=source_type, message=message) + + +def _validate_pool_parameter(parameters): + logger.debug("validating pool parameter") + if config.PARAMETERS_POOL in parameters: + if not parameters[config.PARAMETERS_POOL]: + raise ValidationException(messages.pool_should_not_be_empty_message) + elif not parameters.get(config.PARAMETERS_BY_SYSTEM): + raise ValidationException(messages.pool_is_missing_message) + + +def _validate_object_id(object_id, object_type=config.VOLUME_TYPE_NAME, + message=messages.volume_id_should_not_be_empty_message): + logger.debug("validating volume id") + if not object_id: + raise ValidationException(message) + if config.PARAMETERS_OBJECT_ID_INFO_DELIMITER not in object_id: + raise ObjectIdError(object_type, object_id) + if len(object_id.split(config.PARAMETERS_OBJECT_ID_INFO_DELIMITER)) not in {config.MINIMUM_VOLUME_ID_PARTS, + config.MAXIMUM_VOLUME_ID_PARTS}: + raise ValidationException(messages.volume_id_wrong_format_message) def validate_create_volume_request(request): @@ -232,19 +258,14 @@ def validate_create_volume_request(request): else: raise ValidationException(messages.no_capacity_range_message) - validate_csi_volume_capabilties(request.volume_capabilities) + validate_csi_volume_capabilities(request.volume_capabilities) validate_secrets(request.secrets) - logger.debug("validating storage class parameters") if request.parameters: - if config.PARAMETERS_POOL in request.parameters: - if not request.parameters[config.PARAMETERS_POOL]: - raise ValidationException(messages.pool_should_not_be_empty_message) - elif not request.parameters.get(config.PARAMETERS_BY_SYSTEM): - raise ValidationException(messages.pool_is_missing_message) + _validate_pool_parameter(request.parameters) else: - raise ValidationException(messages.params_are_missing_message) + raise ValidationException(messages.pool_is_missing_message) logger.debug("validating volume copy source") validate_create_volume_source(request) @@ -257,7 +278,9 @@ def validate_create_snapshot_request(request): logger.debug("validating snapshot name") if not request.name: raise ValidationException(messages.name_should_not_be_empty_message) + validate_secrets(request.secrets) + logger.debug("validating source volume id") if not request.source_volume_id: raise ValidationException(messages.snapshot_src_volume_id_is_missing) @@ -267,16 +290,41 @@ def validate_create_snapshot_request(request): def validate_delete_snapshot_request(request): logger.debug("validating delete snapshot request") if not request.snapshot_id: - raise ValidationException(messages.name_should_not_be_empty_message) + raise ValidationException(messages.snapshot_id_should_not_be_empty_message) + validate_secrets(request.secrets) + logger.debug("request validation finished.") +def validate_validate_volume_capabilities_request(request): + logger.debug("validating validate_volume_capabilities request") + + _validate_object_id(request.volume_id) + + if request.parameters: + _validate_pool_parameter(request.parameters) + + validate_csi_volume_capabilities(request.volume_capabilities) + + validate_secrets(request.secrets) + + +def validate_volume_context_match_volume(volume_context, volume): + logger.debug("validate volume_context is matching volume") + context_from_existing_volume = _get_context_from_volume(volume) + + if volume_context != context_from_existing_volume: + raise ValidationException( + messages.volume_context_not_match_volume_message.format(volume_context, context_from_existing_volume)) + logger.debug("volume_context validation finished.") + + def validate_expand_volume_request(request): logger.debug("validating expand volume request") if not request.volume_id: - raise ValidationException(messages.id_should_not_be_empty_message) + raise ValidationException(messages.volume_id_should_not_be_empty_message) logger.debug("validating volume capacity") if request.capacity_range: @@ -291,15 +339,10 @@ def validate_expand_volume_request(request): def generate_csi_create_volume_response(new_volume, system_id=None, source_type=None): - logger.debug("creating volume response for volume : {0}".format(new_volume)) - - volume_context = {"volume_name": new_volume.name, - "array_address": ",".join( - new_volume.array_address if isinstance(new_volume.array_address, list) else [ - new_volume.array_address]), - "pool_name": new_volume.pool, - "storage_type": new_volume.array_type - } + logger.debug("creating create volume response for volume : {0}".format(new_volume)) + + volume_context = _get_context_from_volume(new_volume) + content_source = None if new_volume.copy_source_id: if source_type == config.SNAPSHOT_TYPE_NAME: @@ -319,12 +362,12 @@ def generate_csi_create_volume_response(new_volume, system_id=None, source_type= return res -def generate_csi_create_snapshot_response(new_snapshot, source_volume_id): - logger.debug("creating snapshot response for snapshot : {0}".format(new_snapshot)) +def generate_csi_create_snapshot_response(new_snapshot, system_id, source_volume_id): + logger.debug("creating create snapshot response for snapshot : {0}".format(new_snapshot)) res = csi_pb2.CreateSnapshotResponse(snapshot=csi_pb2.Snapshot( size_bytes=new_snapshot.capacity_bytes, - snapshot_id=get_snapshot_id(new_snapshot), + snapshot_id=get_snapshot_id(new_snapshot, system_id), source_volume_id=source_volume_id, creation_time=get_current_timestamp(), ready_to_use=new_snapshot.is_ready)) @@ -344,6 +387,36 @@ def generate_csi_expand_volume_response(capacity_bytes, node_expansion_required= return res +def _get_supported_capability(volume_capability): + access_mode = csi_pb2.VolumeCapability.AccessMode(mode=volume_capability.access_mode.mode) + + if volume_capability.HasField(config.VOLUME_CAPABILITIES_FIELD_ACCESS_TYPE_MOUNT): + return csi_pb2.VolumeCapability( + mount=csi_pb2.VolumeCapability.MountVolume(fs_type=volume_capability.mount.fs_type), + access_mode=access_mode) + + return csi_pb2.VolumeCapability( + mount=csi_pb2.VolumeCapability.BlockVolume(), + access_mode=access_mode) + + +def generate_csi_validate_volume_capabilities_response(volume_context, volume_capabilities, parameters): + logger.debug("creating validate volume capabilities response") + + capabilities = [] + for capability in volume_capabilities: + supported_capability = _get_supported_capability(volume_capability=capability) + capabilities.append(supported_capability) + + res = csi_pb2.ValidateVolumeCapabilitiesResponse(confirmed=csi_pb2.ValidateVolumeCapabilitiesResponse.Confirmed( + volume_context=volume_context, + volume_capabilities=capabilities, + parameters=parameters)) + + logger.debug("finished creating validate volume capabilities response") + return res + + def validate_delete_volume_request(request): logger.debug("validating delete volume request") @@ -377,18 +450,34 @@ def get_snapshot_id_info(snapshot_id): return get_object_id_info(snapshot_id, config.SNAPSHOT_TYPE_NAME) +def _get_context_from_volume(volume): + return {config.VOLUME_CONTEXT_VOLUME_NAME: volume.name, + config.VOLUME_CONTEXT_ARRAY_ADDRESS: ",".join( + volume.array_address if isinstance(volume.array_address, list) else [volume.array_address]), + config.VOLUME_CONTEXT_POOL: volume.pool, + config.VOLUME_CONTEXT_STORAGE_TYPE: volume.array_type + } + + def get_object_id_info(full_object_id, object_type): logger.debug("getting {0} info for id : {1}".format(object_type, full_object_id)) - splitted_object_id = full_object_id.split(config.PARAMETERS_OBJECT_ID_DELIMITER) - system_id = None + splitted_object_id = full_object_id.split(config.PARAMETERS_OBJECT_ID_INFO_DELIMITER) + system_id, wwn, internal_id = None, None, None if len(splitted_object_id) == 2: array_type, object_id = splitted_object_id elif len(splitted_object_id) == 3: array_type, system_id, object_id = splitted_object_id else: raise ObjectIdError(object_type, full_object_id) + splitted_id = object_id.split(config.PARAMETERS_OBJECT_IDS_DELIMITER) + if len(splitted_id) == 1: + wwn = splitted_id[0] + elif len(splitted_id) == 2: + internal_id, wwn = splitted_id + else: + raise ObjectIdError(object_type, full_object_id) logger.debug("volume id : {0}, array type :{1}".format(object_id, array_type)) - return ObjectIdInfo(array_type=array_type, system_id=system_id, object_id=object_id) + return ObjectIdInfo(array_type=array_type, system_id=system_id, internal_id=internal_id, object_id=wwn) def get_node_id_info(node_id): @@ -406,15 +495,16 @@ def get_node_id_info(node_id): return hostname, fc_wwns, iscsi_iqn -def choose_connectivity_type(connecitvity_types): +def choose_connectivity_type(connectivity_types): # If connectivity type support FC and iSCSI at the same time, chose FC - logger.debug("choosing connectivity type for connectivity types : {0}".format(connecitvity_types)) - if FC_CONNECTIVITY_TYPE in connecitvity_types: + logger.debug("choosing connectivity type for connectivity types : {0}".format(connectivity_types)) + if FC_CONNECTIVITY_TYPE in connectivity_types: logger.debug("connectivity type is : {0}".format(FC_CONNECTIVITY_TYPE)) return FC_CONNECTIVITY_TYPE - if ISCSI_CONNECTIVITY_TYPE in connecitvity_types: + if ISCSI_CONNECTIVITY_TYPE in connectivity_types: logger.debug("connectivity type is : {0}".format(ISCSI_CONNECTIVITY_TYPE)) return ISCSI_CONNECTIVITY_TYPE + return None def generate_csi_publish_volume_response(lun, connectivity_type, config, array_initiators): @@ -448,16 +538,31 @@ def generate_csi_publish_volume_response(lun, connectivity_type, config, array_i def validate_unpublish_volume_request(request): logger.debug("validating unpublish volume request") - logger.debug("validating volume id") - if len(request.volume_id.split(config.PARAMETERS_OBJECT_ID_DELIMITER)) not in {config.MINIMUM_VOLUME_ID_PARTS, - config.MAXIMUM_VOLUME_ID_PARTS}: - raise ValidationException(messages.volume_id_wrong_format_message) + _validate_object_id(request.volume_id) validate_secrets(request.secrets) logger.debug("unpublish volume request validation finished.") +def validate_addons_request(request): + logger.debug("validating addons request") + + logger.debug("validating volume id") + if request.volume_id == "" or request.replication_id == "": + raise ValidationException(messages.volume_id_should_not_be_empty_message) + + logger.debug("validating copy type") + if config.PARAMETERS_COPY_TYPE in request.parameters: + copy_type = request.parameters.get(config.PARAMETERS_COPY_TYPE) + if copy_type not in (REPLICATION_COPY_TYPE_SYNC, REPLICATION_COPY_TYPE_ASYNC): + raise ValidationException(messages.invalid_replication_copy_type_message.format(copy_type)) + + validate_secrets(request.secrets) + + logger.debug("addons request validation finished") + + def get_current_timestamp(): res = Timestamp() res.GetCurrentTime() @@ -466,3 +571,28 @@ def get_current_timestamp(): def hash_string(string): return base58.b58encode(sha256(string.encode()).digest()).decode() + + +def _validate_parameter_match_volume(parameter_value, value_from_volume, error_message_format, cmp=eq): + if parameter_value and not cmp(parameter_value, value_from_volume): + raise ValidationException(error_message_format.format(parameter_value, value_from_volume)) + + +def validate_parameters_match_volume(parameters, volume): + logger.debug("validating space efficiency parameter matches volume's") + space_efficiency = parameters.get(config.PARAMETERS_SPACE_EFFICIENCY) + if space_efficiency: + space_efficiency = space_efficiency.lower() + else: + space_efficiency = volume.default_space_efficiency + _validate_parameter_match_volume(space_efficiency, volume.space_efficiency, + messages.space_efficiency_not_match_volume_message) + + logger.debug("validating pool parameter matches volume's") + pool = parameters.get(config.PARAMETERS_POOL) + _validate_parameter_match_volume(pool, volume.pool, messages.pool_not_match_volume_message) + + logger.debug("validating prefix parameter matches volume's") + prefix = parameters.get(config.PARAMETERS_VOLUME_NAME_PREFIX) + _validate_parameter_match_volume(prefix, volume.name, messages.prefix_not_match_volume_message, + lambda pref, name: name.startswith(pref + NAME_PREFIX_SEPARATOR)) diff --git a/controller/csi_general/replication_pb2.py b/controller/csi_general/replication_pb2.py new file mode 100644 index 000000000..f80adb059 --- /dev/null +++ b/controller/csi_general/replication_pb2.py @@ -0,0 +1,1092 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: controller/csi_general/replication.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='controller/csi_general/replication.proto', + package='replication', + syntax='proto3', + serialized_options=_b('Z\r.;replication'), + serialized_pb=_b('\n(controller/csi_general/replication.proto\x12\x0breplication\x1a google/protobuf/descriptor.proto\"\xd4\x02\n\x1e\x45nableVolumeReplicationRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x1b\n\x0ereplication_id\x18\x04 \x01(\tB\x03\xe0\x44\x01\x12O\n\nparameters\x18\x02 \x03(\x0b\x32;.replication.EnableVolumeReplicationRequest.ParametersEntry\x12N\n\x07secrets\x18\x03 \x03(\x0b\x32\x38.replication.EnableVolumeReplicationRequest.SecretsEntryB\x03\xd8\x44\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"!\n\x1f\x45nableVolumeReplicationResponse\"\xd7\x02\n\x1f\x44isableVolumeReplicationRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x1b\n\x0ereplication_id\x18\x04 \x01(\tB\x03\xe0\x44\x01\x12P\n\nparameters\x18\x02 \x03(\x0b\x32<.replication.DisableVolumeReplicationRequest.ParametersEntry\x12O\n\x07secrets\x18\x03 \x03(\x0b\x32\x39.replication.DisableVolumeReplicationRequest.SecretsEntryB\x03\xd8\x44\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\"\n DisableVolumeReplicationResponse\"\xc5\x02\n\x14PromoteVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x1b\n\x0ereplication_id\x18\x05 \x01(\tB\x03\xe0\x44\x01\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12\x45\n\nparameters\x18\x03 \x03(\x0b\x32\x31.replication.PromoteVolumeRequest.ParametersEntry\x12\x44\n\x07secrets\x18\x04 \x03(\x0b\x32..replication.PromoteVolumeRequest.SecretsEntryB\x03\xd8\x44\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x17\n\x15PromoteVolumeResponse\"\xc2\x02\n\x13\x44\x65moteVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x1b\n\x0ereplication_id\x18\x05 \x01(\tB\x03\xe0\x44\x01\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12\x44\n\nparameters\x18\x03 \x03(\x0b\x32\x30.replication.DemoteVolumeRequest.ParametersEntry\x12\x43\n\x07secrets\x18\x04 \x03(\x0b\x32-.replication.DemoteVolumeRequest.SecretsEntryB\x03\xd8\x44\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x16\n\x14\x44\x65moteVolumeResponse\"\xc2\x02\n\x13ResyncVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x1b\n\x0ereplication_id\x18\x05 \x01(\tB\x03\xe0\x44\x01\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12\x44\n\nparameters\x18\x03 \x03(\x0b\x32\x30.replication.ResyncVolumeRequest.ParametersEntry\x12\x43\n\x07secrets\x18\x04 \x03(\x0b\x32-.replication.ResyncVolumeRequest.SecretsEntryB\x03\xd8\x44\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"%\n\x14ResyncVolumeResponse\x12\r\n\x05ready\x18\x01 \x01(\x08\x32\x87\x04\n\nController\x12v\n\x17\x45nableVolumeReplication\x12+.replication.EnableVolumeReplicationRequest\x1a,.replication.EnableVolumeReplicationResponse\"\x00\x12y\n\x18\x44isableVolumeReplication\x12,.replication.DisableVolumeReplicationRequest\x1a-.replication.DisableVolumeReplicationResponse\"\x00\x12X\n\rPromoteVolume\x12!.replication.PromoteVolumeRequest\x1a\".replication.PromoteVolumeResponse\"\x00\x12U\n\x0c\x44\x65moteVolume\x12 .replication.DemoteVolumeRequest\x1a!.replication.DemoteVolumeResponse\"\x00\x12U\n\x0cResyncVolume\x12 .replication.ResyncVolumeRequest\x1a!.replication.ResyncVolumeResponse\"\x00::\n\x12replication_secret\x12\x1d.google.protobuf.FieldOptions\x18\xcb\x08 \x01(\x08:3\n\x0b\x61lpha_field\x12\x1d.google.protobuf.FieldOptions\x18\xcc\x08 \x01(\x08\x42\x0fZ\r.;replicationb\x06proto3') + , + dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,]) + + +REPLICATION_SECRET_FIELD_NUMBER = 1099 +replication_secret = _descriptor.FieldDescriptor( + name='replication_secret', full_name='replication.replication_secret', index=0, + number=1099, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=True, extension_scope=None, + serialized_options=None, file=DESCRIPTOR) +ALPHA_FIELD_FIELD_NUMBER = 1100 +alpha_field = _descriptor.FieldDescriptor( + name='alpha_field', full_name='replication.alpha_field', index=1, + number=1100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=True, extension_scope=None, + serialized_options=None, file=DESCRIPTOR) + + +_ENABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='replication.EnableVolumeReplicationRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.EnableVolumeReplicationRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.EnableVolumeReplicationRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=335, + serialized_end=384, +) + +_ENABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='replication.EnableVolumeReplicationRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.EnableVolumeReplicationRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.EnableVolumeReplicationRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=386, + serialized_end=432, +) + +_ENABLEVOLUMEREPLICATIONREQUEST = _descriptor.Descriptor( + name='EnableVolumeReplicationRequest', + full_name='replication.EnableVolumeReplicationRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='replication.EnableVolumeReplicationRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='replication_id', full_name='replication.EnableVolumeReplicationRequest.replication_id', index=1, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\340D\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='replication.EnableVolumeReplicationRequest.parameters', index=2, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='replication.EnableVolumeReplicationRequest.secrets', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\330D\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_ENABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY, _ENABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=92, + serialized_end=432, +) + + +_ENABLEVOLUMEREPLICATIONRESPONSE = _descriptor.Descriptor( + name='EnableVolumeReplicationResponse', + full_name='replication.EnableVolumeReplicationResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=434, + serialized_end=467, +) + + +_DISABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='replication.DisableVolumeReplicationRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.DisableVolumeReplicationRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.DisableVolumeReplicationRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=335, + serialized_end=384, +) + +_DISABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='replication.DisableVolumeReplicationRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.DisableVolumeReplicationRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.DisableVolumeReplicationRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=386, + serialized_end=432, +) + +_DISABLEVOLUMEREPLICATIONREQUEST = _descriptor.Descriptor( + name='DisableVolumeReplicationRequest', + full_name='replication.DisableVolumeReplicationRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='replication.DisableVolumeReplicationRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='replication_id', full_name='replication.DisableVolumeReplicationRequest.replication_id', index=1, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\340D\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='replication.DisableVolumeReplicationRequest.parameters', index=2, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='replication.DisableVolumeReplicationRequest.secrets', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\330D\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_DISABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY, _DISABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=470, + serialized_end=813, +) + + +_DISABLEVOLUMEREPLICATIONRESPONSE = _descriptor.Descriptor( + name='DisableVolumeReplicationResponse', + full_name='replication.DisableVolumeReplicationResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=815, + serialized_end=849, +) + + +_PROMOTEVOLUMEREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='replication.PromoteVolumeRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.PromoteVolumeRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.PromoteVolumeRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=335, + serialized_end=384, +) + +_PROMOTEVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='replication.PromoteVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.PromoteVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.PromoteVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=386, + serialized_end=432, +) + +_PROMOTEVOLUMEREQUEST = _descriptor.Descriptor( + name='PromoteVolumeRequest', + full_name='replication.PromoteVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='replication.PromoteVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='replication_id', full_name='replication.PromoteVolumeRequest.replication_id', index=1, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\340D\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='force', full_name='replication.PromoteVolumeRequest.force', index=2, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='replication.PromoteVolumeRequest.parameters', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='replication.PromoteVolumeRequest.secrets', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\330D\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_PROMOTEVOLUMEREQUEST_PARAMETERSENTRY, _PROMOTEVOLUMEREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=852, + serialized_end=1177, +) + + +_PROMOTEVOLUMERESPONSE = _descriptor.Descriptor( + name='PromoteVolumeResponse', + full_name='replication.PromoteVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1179, + serialized_end=1202, +) + + +_DEMOTEVOLUMEREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='replication.DemoteVolumeRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.DemoteVolumeRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.DemoteVolumeRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=335, + serialized_end=384, +) + +_DEMOTEVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='replication.DemoteVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.DemoteVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.DemoteVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=386, + serialized_end=432, +) + +_DEMOTEVOLUMEREQUEST = _descriptor.Descriptor( + name='DemoteVolumeRequest', + full_name='replication.DemoteVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='replication.DemoteVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='replication_id', full_name='replication.DemoteVolumeRequest.replication_id', index=1, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\340D\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='force', full_name='replication.DemoteVolumeRequest.force', index=2, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='replication.DemoteVolumeRequest.parameters', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='replication.DemoteVolumeRequest.secrets', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\330D\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_DEMOTEVOLUMEREQUEST_PARAMETERSENTRY, _DEMOTEVOLUMEREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1205, + serialized_end=1527, +) + + +_DEMOTEVOLUMERESPONSE = _descriptor.Descriptor( + name='DemoteVolumeResponse', + full_name='replication.DemoteVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1529, + serialized_end=1551, +) + + +_RESYNCVOLUMEREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='replication.ResyncVolumeRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.ResyncVolumeRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.ResyncVolumeRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=335, + serialized_end=384, +) + +_RESYNCVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='replication.ResyncVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='replication.ResyncVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='replication.ResyncVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=386, + serialized_end=432, +) + +_RESYNCVOLUMEREQUEST = _descriptor.Descriptor( + name='ResyncVolumeRequest', + full_name='replication.ResyncVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='replication.ResyncVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='replication_id', full_name='replication.ResyncVolumeRequest.replication_id', index=1, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\340D\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='force', full_name='replication.ResyncVolumeRequest.force', index=2, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='replication.ResyncVolumeRequest.parameters', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='replication.ResyncVolumeRequest.secrets', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\330D\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_RESYNCVOLUMEREQUEST_PARAMETERSENTRY, _RESYNCVOLUMEREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1554, + serialized_end=1876, +) + + +_RESYNCVOLUMERESPONSE = _descriptor.Descriptor( + name='ResyncVolumeResponse', + full_name='replication.ResyncVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='ready', full_name='replication.ResyncVolumeResponse.ready', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1878, + serialized_end=1915, +) + +_ENABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY.containing_type = _ENABLEVOLUMEREPLICATIONREQUEST +_ENABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY.containing_type = _ENABLEVOLUMEREPLICATIONREQUEST +_ENABLEVOLUMEREPLICATIONREQUEST.fields_by_name['parameters'].message_type = _ENABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY +_ENABLEVOLUMEREPLICATIONREQUEST.fields_by_name['secrets'].message_type = _ENABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY +_DISABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY.containing_type = _DISABLEVOLUMEREPLICATIONREQUEST +_DISABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY.containing_type = _DISABLEVOLUMEREPLICATIONREQUEST +_DISABLEVOLUMEREPLICATIONREQUEST.fields_by_name['parameters'].message_type = _DISABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY +_DISABLEVOLUMEREPLICATIONREQUEST.fields_by_name['secrets'].message_type = _DISABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY +_PROMOTEVOLUMEREQUEST_PARAMETERSENTRY.containing_type = _PROMOTEVOLUMEREQUEST +_PROMOTEVOLUMEREQUEST_SECRETSENTRY.containing_type = _PROMOTEVOLUMEREQUEST +_PROMOTEVOLUMEREQUEST.fields_by_name['parameters'].message_type = _PROMOTEVOLUMEREQUEST_PARAMETERSENTRY +_PROMOTEVOLUMEREQUEST.fields_by_name['secrets'].message_type = _PROMOTEVOLUMEREQUEST_SECRETSENTRY +_DEMOTEVOLUMEREQUEST_PARAMETERSENTRY.containing_type = _DEMOTEVOLUMEREQUEST +_DEMOTEVOLUMEREQUEST_SECRETSENTRY.containing_type = _DEMOTEVOLUMEREQUEST +_DEMOTEVOLUMEREQUEST.fields_by_name['parameters'].message_type = _DEMOTEVOLUMEREQUEST_PARAMETERSENTRY +_DEMOTEVOLUMEREQUEST.fields_by_name['secrets'].message_type = _DEMOTEVOLUMEREQUEST_SECRETSENTRY +_RESYNCVOLUMEREQUEST_PARAMETERSENTRY.containing_type = _RESYNCVOLUMEREQUEST +_RESYNCVOLUMEREQUEST_SECRETSENTRY.containing_type = _RESYNCVOLUMEREQUEST +_RESYNCVOLUMEREQUEST.fields_by_name['parameters'].message_type = _RESYNCVOLUMEREQUEST_PARAMETERSENTRY +_RESYNCVOLUMEREQUEST.fields_by_name['secrets'].message_type = _RESYNCVOLUMEREQUEST_SECRETSENTRY +DESCRIPTOR.message_types_by_name['EnableVolumeReplicationRequest'] = _ENABLEVOLUMEREPLICATIONREQUEST +DESCRIPTOR.message_types_by_name['EnableVolumeReplicationResponse'] = _ENABLEVOLUMEREPLICATIONRESPONSE +DESCRIPTOR.message_types_by_name['DisableVolumeReplicationRequest'] = _DISABLEVOLUMEREPLICATIONREQUEST +DESCRIPTOR.message_types_by_name['DisableVolumeReplicationResponse'] = _DISABLEVOLUMEREPLICATIONRESPONSE +DESCRIPTOR.message_types_by_name['PromoteVolumeRequest'] = _PROMOTEVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['PromoteVolumeResponse'] = _PROMOTEVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['DemoteVolumeRequest'] = _DEMOTEVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['DemoteVolumeResponse'] = _DEMOTEVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['ResyncVolumeRequest'] = _RESYNCVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['ResyncVolumeResponse'] = _RESYNCVOLUMERESPONSE +DESCRIPTOR.extensions_by_name['replication_secret'] = replication_secret +DESCRIPTOR.extensions_by_name['alpha_field'] = alpha_field +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +EnableVolumeReplicationRequest = _reflection.GeneratedProtocolMessageType('EnableVolumeReplicationRequest', (_message.Message,), dict( + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _ENABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.EnableVolumeReplicationRequest.ParametersEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _ENABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.EnableVolumeReplicationRequest.SecretsEntry) + )) + , + DESCRIPTOR = _ENABLEVOLUMEREPLICATIONREQUEST, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.EnableVolumeReplicationRequest) + )) +_sym_db.RegisterMessage(EnableVolumeReplicationRequest) +_sym_db.RegisterMessage(EnableVolumeReplicationRequest.ParametersEntry) +_sym_db.RegisterMessage(EnableVolumeReplicationRequest.SecretsEntry) + +EnableVolumeReplicationResponse = _reflection.GeneratedProtocolMessageType('EnableVolumeReplicationResponse', (_message.Message,), dict( + DESCRIPTOR = _ENABLEVOLUMEREPLICATIONRESPONSE, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.EnableVolumeReplicationResponse) + )) +_sym_db.RegisterMessage(EnableVolumeReplicationResponse) + +DisableVolumeReplicationRequest = _reflection.GeneratedProtocolMessageType('DisableVolumeReplicationRequest', (_message.Message,), dict( + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _DISABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.DisableVolumeReplicationRequest.ParametersEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _DISABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.DisableVolumeReplicationRequest.SecretsEntry) + )) + , + DESCRIPTOR = _DISABLEVOLUMEREPLICATIONREQUEST, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.DisableVolumeReplicationRequest) + )) +_sym_db.RegisterMessage(DisableVolumeReplicationRequest) +_sym_db.RegisterMessage(DisableVolumeReplicationRequest.ParametersEntry) +_sym_db.RegisterMessage(DisableVolumeReplicationRequest.SecretsEntry) + +DisableVolumeReplicationResponse = _reflection.GeneratedProtocolMessageType('DisableVolumeReplicationResponse', (_message.Message,), dict( + DESCRIPTOR = _DISABLEVOLUMEREPLICATIONRESPONSE, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.DisableVolumeReplicationResponse) + )) +_sym_db.RegisterMessage(DisableVolumeReplicationResponse) + +PromoteVolumeRequest = _reflection.GeneratedProtocolMessageType('PromoteVolumeRequest', (_message.Message,), dict( + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _PROMOTEVOLUMEREQUEST_PARAMETERSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.PromoteVolumeRequest.ParametersEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _PROMOTEVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.PromoteVolumeRequest.SecretsEntry) + )) + , + DESCRIPTOR = _PROMOTEVOLUMEREQUEST, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.PromoteVolumeRequest) + )) +_sym_db.RegisterMessage(PromoteVolumeRequest) +_sym_db.RegisterMessage(PromoteVolumeRequest.ParametersEntry) +_sym_db.RegisterMessage(PromoteVolumeRequest.SecretsEntry) + +PromoteVolumeResponse = _reflection.GeneratedProtocolMessageType('PromoteVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _PROMOTEVOLUMERESPONSE, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.PromoteVolumeResponse) + )) +_sym_db.RegisterMessage(PromoteVolumeResponse) + +DemoteVolumeRequest = _reflection.GeneratedProtocolMessageType('DemoteVolumeRequest', (_message.Message,), dict( + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _DEMOTEVOLUMEREQUEST_PARAMETERSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.DemoteVolumeRequest.ParametersEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _DEMOTEVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.DemoteVolumeRequest.SecretsEntry) + )) + , + DESCRIPTOR = _DEMOTEVOLUMEREQUEST, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.DemoteVolumeRequest) + )) +_sym_db.RegisterMessage(DemoteVolumeRequest) +_sym_db.RegisterMessage(DemoteVolumeRequest.ParametersEntry) +_sym_db.RegisterMessage(DemoteVolumeRequest.SecretsEntry) + +DemoteVolumeResponse = _reflection.GeneratedProtocolMessageType('DemoteVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _DEMOTEVOLUMERESPONSE, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.DemoteVolumeResponse) + )) +_sym_db.RegisterMessage(DemoteVolumeResponse) + +ResyncVolumeRequest = _reflection.GeneratedProtocolMessageType('ResyncVolumeRequest', (_message.Message,), dict( + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _RESYNCVOLUMEREQUEST_PARAMETERSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.ResyncVolumeRequest.ParametersEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _RESYNCVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.ResyncVolumeRequest.SecretsEntry) + )) + , + DESCRIPTOR = _RESYNCVOLUMEREQUEST, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.ResyncVolumeRequest) + )) +_sym_db.RegisterMessage(ResyncVolumeRequest) +_sym_db.RegisterMessage(ResyncVolumeRequest.ParametersEntry) +_sym_db.RegisterMessage(ResyncVolumeRequest.SecretsEntry) + +ResyncVolumeResponse = _reflection.GeneratedProtocolMessageType('ResyncVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _RESYNCVOLUMERESPONSE, + __module__ = 'controller.csi_general.replication_pb2' + # @@protoc_insertion_point(class_scope:replication.ResyncVolumeResponse) + )) +_sym_db.RegisterMessage(ResyncVolumeResponse) + +google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(replication_secret) +google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(alpha_field) + +DESCRIPTOR._options = None +_ENABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY._options = None +_ENABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY._options = None +_ENABLEVOLUMEREPLICATIONREQUEST.fields_by_name['replication_id']._options = None +_ENABLEVOLUMEREPLICATIONREQUEST.fields_by_name['secrets']._options = None +_DISABLEVOLUMEREPLICATIONREQUEST_PARAMETERSENTRY._options = None +_DISABLEVOLUMEREPLICATIONREQUEST_SECRETSENTRY._options = None +_DISABLEVOLUMEREPLICATIONREQUEST.fields_by_name['replication_id']._options = None +_DISABLEVOLUMEREPLICATIONREQUEST.fields_by_name['secrets']._options = None +_PROMOTEVOLUMEREQUEST_PARAMETERSENTRY._options = None +_PROMOTEVOLUMEREQUEST_SECRETSENTRY._options = None +_PROMOTEVOLUMEREQUEST.fields_by_name['replication_id']._options = None +_PROMOTEVOLUMEREQUEST.fields_by_name['secrets']._options = None +_DEMOTEVOLUMEREQUEST_PARAMETERSENTRY._options = None +_DEMOTEVOLUMEREQUEST_SECRETSENTRY._options = None +_DEMOTEVOLUMEREQUEST.fields_by_name['replication_id']._options = None +_DEMOTEVOLUMEREQUEST.fields_by_name['secrets']._options = None +_RESYNCVOLUMEREQUEST_PARAMETERSENTRY._options = None +_RESYNCVOLUMEREQUEST_SECRETSENTRY._options = None +_RESYNCVOLUMEREQUEST.fields_by_name['replication_id']._options = None +_RESYNCVOLUMEREQUEST.fields_by_name['secrets']._options = None + +_CONTROLLER = _descriptor.ServiceDescriptor( + name='Controller', + full_name='replication.Controller', + file=DESCRIPTOR, + index=0, + serialized_options=None, + serialized_start=1918, + serialized_end=2437, + methods=[ + _descriptor.MethodDescriptor( + name='EnableVolumeReplication', + full_name='replication.Controller.EnableVolumeReplication', + index=0, + containing_service=None, + input_type=_ENABLEVOLUMEREPLICATIONREQUEST, + output_type=_ENABLEVOLUMEREPLICATIONRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='DisableVolumeReplication', + full_name='replication.Controller.DisableVolumeReplication', + index=1, + containing_service=None, + input_type=_DISABLEVOLUMEREPLICATIONREQUEST, + output_type=_DISABLEVOLUMEREPLICATIONRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='PromoteVolume', + full_name='replication.Controller.PromoteVolume', + index=2, + containing_service=None, + input_type=_PROMOTEVOLUMEREQUEST, + output_type=_PROMOTEVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='DemoteVolume', + full_name='replication.Controller.DemoteVolume', + index=3, + containing_service=None, + input_type=_DEMOTEVOLUMEREQUEST, + output_type=_DEMOTEVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='ResyncVolume', + full_name='replication.Controller.ResyncVolume', + index=4, + containing_service=None, + input_type=_RESYNCVOLUMEREQUEST, + output_type=_RESYNCVOLUMERESPONSE, + serialized_options=None, + ), +]) +_sym_db.RegisterServiceDescriptor(_CONTROLLER) + +DESCRIPTOR.services_by_name['Controller'] = _CONTROLLER + +# @@protoc_insertion_point(module_scope) diff --git a/controller/csi_general/replication_pb2_grpc.py b/controller/csi_general/replication_pb2_grpc.py new file mode 100644 index 000000000..982ed2200 --- /dev/null +++ b/controller/csi_general/replication_pb2_grpc.py @@ -0,0 +1,116 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +from controller.csi_general import replication_pb2 as controller_dot_csi__general_dot_replication__pb2 + + +class ControllerStub(object): + """Controller holds the RPC Methods for replication and all the methods it + exposes should be idempotent. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.EnableVolumeReplication = channel.unary_unary( + '/replication.Controller/EnableVolumeReplication', + request_serializer=controller_dot_csi__general_dot_replication__pb2.EnableVolumeReplicationRequest.SerializeToString, + response_deserializer=controller_dot_csi__general_dot_replication__pb2.EnableVolumeReplicationResponse.FromString, + ) + self.DisableVolumeReplication = channel.unary_unary( + '/replication.Controller/DisableVolumeReplication', + request_serializer=controller_dot_csi__general_dot_replication__pb2.DisableVolumeReplicationRequest.SerializeToString, + response_deserializer=controller_dot_csi__general_dot_replication__pb2.DisableVolumeReplicationResponse.FromString, + ) + self.PromoteVolume = channel.unary_unary( + '/replication.Controller/PromoteVolume', + request_serializer=controller_dot_csi__general_dot_replication__pb2.PromoteVolumeRequest.SerializeToString, + response_deserializer=controller_dot_csi__general_dot_replication__pb2.PromoteVolumeResponse.FromString, + ) + self.DemoteVolume = channel.unary_unary( + '/replication.Controller/DemoteVolume', + request_serializer=controller_dot_csi__general_dot_replication__pb2.DemoteVolumeRequest.SerializeToString, + response_deserializer=controller_dot_csi__general_dot_replication__pb2.DemoteVolumeResponse.FromString, + ) + self.ResyncVolume = channel.unary_unary( + '/replication.Controller/ResyncVolume', + request_serializer=controller_dot_csi__general_dot_replication__pb2.ResyncVolumeRequest.SerializeToString, + response_deserializer=controller_dot_csi__general_dot_replication__pb2.ResyncVolumeResponse.FromString, + ) + + +class ControllerServicer(object): + """Controller holds the RPC Methods for replication and all the methods it + exposes should be idempotent. + """ + + def EnableVolumeReplication(self, request, context): + """EnableVolumeReplication RPC call to enable the volume replication. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DisableVolumeReplication(self, request, context): + """DisableVolumeReplication RPC call to disable the volume replication. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PromoteVolume(self, request, context): + """PromoteVolume RPC call to promote the volume. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DemoteVolume(self, request, context): + """DemoteVolume RPC call to demote the volume. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ResyncVolume(self, request, context): + """ResyncVolume RPC call to resync the volume. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ControllerServicer_to_server(servicer, server): + rpc_method_handlers = { + 'EnableVolumeReplication': grpc.unary_unary_rpc_method_handler( + servicer.EnableVolumeReplication, + request_deserializer=controller_dot_csi__general_dot_replication__pb2.EnableVolumeReplicationRequest.FromString, + response_serializer=controller_dot_csi__general_dot_replication__pb2.EnableVolumeReplicationResponse.SerializeToString, + ), + 'DisableVolumeReplication': grpc.unary_unary_rpc_method_handler( + servicer.DisableVolumeReplication, + request_deserializer=controller_dot_csi__general_dot_replication__pb2.DisableVolumeReplicationRequest.FromString, + response_serializer=controller_dot_csi__general_dot_replication__pb2.DisableVolumeReplicationResponse.SerializeToString, + ), + 'PromoteVolume': grpc.unary_unary_rpc_method_handler( + servicer.PromoteVolume, + request_deserializer=controller_dot_csi__general_dot_replication__pb2.PromoteVolumeRequest.FromString, + response_serializer=controller_dot_csi__general_dot_replication__pb2.PromoteVolumeResponse.SerializeToString, + ), + 'DemoteVolume': grpc.unary_unary_rpc_method_handler( + servicer.DemoteVolume, + request_deserializer=controller_dot_csi__general_dot_replication__pb2.DemoteVolumeRequest.FromString, + response_serializer=controller_dot_csi__general_dot_replication__pb2.DemoteVolumeResponse.SerializeToString, + ), + 'ResyncVolume': grpc.unary_unary_rpc_method_handler( + servicer.ResyncVolume, + request_deserializer=controller_dot_csi__general_dot_replication__pb2.ResyncVolumeRequest.FromString, + response_serializer=controller_dot_csi__general_dot_replication__pb2.ResyncVolumeResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'replication.Controller', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/controller/scripts/csi_pb2.sh b/controller/scripts/csi_pb2.sh index ff4ba658f..cb0026121 100644 --- a/controller/scripts/csi_pb2.sh +++ b/controller/scripts/csi_pb2.sh @@ -1,6 +1,21 @@ -#!/bin/bash +#!/bin/bash -e set -x -version="v1.2.0" -curl -o csi.proto https://raw.githubusercontent.com/container-storage-interface/spec/"$version"/csi.proto -python -m grpc_tools.protoc --proto_path=. --grpc_python_out=./controller/csi_general --python_out=./controller/csi_general csi.proto +CSI_VERSION="v1.2.0" +ADDONS_VERSION="v0.1.1" +PB2_DIR="controller/csi_general" + +mkdir -p ./proto/${PB2_DIR} +cd ./proto/${PB2_DIR} + +curl -O https://raw.githubusercontent.com/container-storage-interface/spec/${CSI_VERSION}/csi.proto +curl -O https://raw.githubusercontent.com/csi-addons/spec/${ADDONS_VERSION}/replication.proto + +cd - + +python -m grpc_tools.protoc --proto_path=proto \ + --python_out=. \ + --grpc_python_out=. \ + proto/${PB2_DIR}/*.proto + +rm -rf ./proto/ diff --git a/controller/scripts/entrypoint.sh b/controller/scripts/entrypoint.sh index 5af5c391c..e8ad9b339 100755 --- a/controller/scripts/entrypoint.sh +++ b/controller/scripts/entrypoint.sh @@ -1,2 +1,2 @@ #!/bin/bash -exec python3.8 /driver/controller/controller_server/csi_controller_server.py $@ +exec python3.8 /driver/controller/controller_server/main.py $@ diff --git a/controller/scripts/lint.ini b/controller/scripts/lint.ini index 48b8b2b96..59d72cbcf 100644 --- a/controller/scripts/lint.ini +++ b/controller/scripts/lint.ini @@ -5,14 +5,14 @@ max-line-length=120 [pycodestyle] # fnmatch pattern: -exclude=csi_pb2*.py +exclude=*_pb2*.py # the rest of the sections are only for pylint: [MASTER] # regex pattern: -ignore-patterns=csi_pb2.*[.]py +ignore-patterns=.*_pb2.*[.]py [MESSAGES CONTROL] @@ -25,8 +25,6 @@ disable= # easy: raise-missing-from, # W - happened only on Jenkins, add "from" after some of the "raise" super-init-not-called, # W - always call parent init (to allow valid inheritance in future code) - inconsistent-return-statements, # R - missing explicit "return None" at the end of some functions - consider-using-in, consider-using-set-comprehension, # R - few occurrences: make code more pythonic invalid-name, redefined-builtin, redefined-outer-name, # C, W, W - bad/colliding names: e.g. lower-case const, e.g. id no-self-use, # R - some functions should be made static arguments-differ, signature-differs, # W - bad method override: keep same args num+names, keep same args default values diff --git a/controller/scripts/unitests.sh b/controller/scripts/unitests.sh index e74d01d13..2ff98c6dc 100755 --- a/controller/scripts/unitests.sh +++ b/controller/scripts/unitests.sh @@ -1,4 +1,5 @@ -#!/bin/bash -x +#!/bin/bash +set -x coveragedir=/driver/coverage/ [ ! -d $coveragedir ] && mkdir -p $coveragedir -exec nosetests --exe --with-coverage --cover-xml --cover-xml-file=$coveragedir/.coverage.xml --cover-package=common --cover-package=controller --with-xunit --xunit-file=$coveragedir/.unitests.xml $@ +exec nosetests --exe --with-coverage --cover-xml --cover-xml-file=$coveragedir/.coverage.xml --cover-package=common --cover-package=controller --with-xunit --xunit-file=$coveragedir/.unitests.xml $@ diff --git a/controller/tests/array_action/ds8k/test_array_mediator_ds8k.py b/controller/tests/array_action/ds8k/test_array_mediator_ds8k.py index de9a45c24..432cf07f7 100644 --- a/controller/tests/array_action/ds8k/test_array_mediator_ds8k.py +++ b/controller/tests/array_action/ds8k/test_array_mediator_ds8k.py @@ -1,6 +1,6 @@ import unittest -from mock import patch, NonCallableMagicMock +from mock import patch, NonCallableMagicMock, Mock from munch import Munch from pyds8k.exceptions import ClientError, ClientException, InternalServerError, NotFound @@ -59,15 +59,6 @@ def setUp(self): } ) - self.snapshot_response = Munch( - {"cap": "1073741824", - "id": "0002", - "name": "test_name", - "pool": "fake_pool", - "flashcopy": "" - } - ) - self.flashcopy_response = Munch( {"sourcevolume": "0001", "targetvolume": "0002", @@ -92,6 +83,13 @@ def test_connect_to_unsupported_system(self): with self.assertRaises(array_errors.UnsupportedStorageVersionError): DS8KArrayMediator("user", "password", self.endpoint) + def test_connect_with_error(self): + self.client_mock.get_system.side_effect = \ + ClientError("400", "other_error") + with self.assertRaises(ClientError) as ex: + DS8KArrayMediator("user", "password", self.endpoint) + self.assertEqual(ex.exception.message, "other_error") + def test_validate_space_efficiency_thin_success(self): self.array.validate_supported_space_efficiency( config.SPACE_EFFICIENCY_THIN @@ -171,23 +169,23 @@ def test_create_volume_raise_already_exists(self): with self.assertRaises(array_errors.VolumeAlreadyExists): self.array.create_volume(self.volume_response.name, "1", 'thin', pool_id) - def test_create_volume_failed_with_ClientException(self): + def test_create_volume_fail_with_ClientException(self): self.client_mock.create_volume.side_effect = ClientException("500") with self.assertRaises(array_errors.VolumeCreationError): self.array.create_volume("fake_name", 1, 'thin', "fake_pool") - def test_create_volume_failed_with_pool_not_found(self): + def test_create_volume_fail_with_pool_not_found(self): self.client_mock.create_volume.side_effect = NotFound("404", message="BE7A0001") with self.assertRaises(array_errors.PoolDoesNotExist): self.array.create_volume("fake_name", 1, 'thin', "fake_pool") - def test_create_volume_failed_with_incorrect_id(self): + def test_create_volume_fail_with_incorrect_id(self): self.client_mock.get_volumes_by_pool.side_effect = InternalServerError("500", message="BE7A0005") with self.assertRaises(array_errors.PoolDoesNotExist): self.array.create_volume("fake_name", 1, 'thin', "fake_pool") - def test_create_volume_failed_with_no_space_in_pool(self): - self.client_mock.get_volumes_by_pool.side_effect = ClientException("500", message="BE534459") + def test_create_volume_fail_with_no_space_in_pool(self): + self.client_mock.get_volumes_by_pool.side_effect = InternalServerError("500", message="BE534459") with self.assertRaises(array_errors.NotEnoughSpaceInPool): self.array.create_volume("fake_name", 1, 'thin', "fake_pool") @@ -196,12 +194,12 @@ def test_delete_volume(self): self.array.delete_volume(scsi_id) self.client_mock.delete_volume.assert_called_once_with(volume_id=scsi_id[-4:]) - def test_delete_volume_failed_with_client_exception(self): + def test_delete_volume_fail_with_ClientException(self): self.client_mock.delete_volume.side_effect = ClientException("500") with self.assertRaises(array_errors.VolumeDeletionError): self.array.delete_volume("fake_id") - def test_delete_volume_failed_with_not_found(self): + def test_delete_volume_fail_with_NotFound(self): self.client_mock.delete_volume.side_effect = NotFound("404") with self.assertRaises(array_errors.ObjectNotFoundError): self.array.delete_volume("fake_id") @@ -211,7 +209,7 @@ def test_delete_volume_failed_with_illegal_object_id(self): with self.assertRaises(array_errors.IllegalObjectID): self.array.delete_volume("fake_id") - def test_delete_volume_with_flashcopies_as_source_and_target_failed(self): + def test_delete_volume_with_flashcopies_as_source_and_target_fail(self): self.client_mock.get_volume.return_value = self.volume_response self.client_mock.get_flashcopies_by_volume.return_value = [ Munch({"sourcevolume": "0001", @@ -268,7 +266,7 @@ def test_delete_volume_with_flashcopy_as_target_success(self): self.client_mock.delete_flashcopy.assert_called_once_with("0001:0002") self.client_mock.delete_volume.assert_called_once_with(volume_id="0001") - def test_get_volume_mappings_failed_with_ClientException(self): + def test_get_volume_mappings_fail_with_ClientException(self): self.client_mock.get_hosts.side_effect = ClientException("500") with self.assertRaises(ClientException): self.array.get_volume_mappings("fake_name") @@ -312,7 +310,12 @@ def test_map_volume_volume_not_found(self): with self.assertRaises(array_errors.ObjectNotFoundError): self.array.map_volume("fake_name", "fake_host") - def test_map_volume_failed_with_ClientException(self): + def test_map_volume_no_available_lun(self): + self.client_mock.map_volume_to_host.side_effect = InternalServerError("500", "[BE74121B]") + with self.assertRaises(array_errors.NoAvailableLunError): + self.array.map_volume("fake_name", "fake_host") + + def test_map_volume_fail_with_ClientException(self): self.client_mock.map_volume_to_host.side_effect = ClientException("500") with self.assertRaises(array_errors.MappingError): self.array.map_volume("fake_name", "fake_host") @@ -340,7 +343,7 @@ def test_unmap_volume_volume_not_found(self): with self.assertRaises(array_errors.ObjectNotFoundError): self.array.unmap_volume("fake_name", "fake_host") - def test_unmap_volume_failed_with_ClientException(self): + def test_unmap_volume_fail_with_ClientException(self): volume_id = "0001" lunid = "1" host_name = "test_host" @@ -370,7 +373,7 @@ def test_unmap_volume(self): self.client_mock.unmap_volume_from_host.assert_called_once_with(host_name=host_name, lunid=lunid) - def test_get_array_fc_wwns_failed_with_ClientException(self): + def test_get_array_fc_wwns_fail_with_ClientException(self): self.client_mock.get_host.side_effect = ClientException("500") with self.assertRaises(ClientException): self.array.get_array_fc_wwns() @@ -476,90 +479,125 @@ def test_get_snapshot_no_pool_success(self): volume = self.array.get_snapshot("volume_id", "test_name", pool=None) self.assertEqual(volume.name, target_volume.name) - def _prepare_mocks_for_create_snapshot(self): - self.client_mock.get_volumes_by_pool.return_value = [self.volume_response] - volume = Munch( - {"cap": "1073741824", - "id": "0001", - "name": "target_volume", - "pool": "fake_pool", - "tp": "ese", - "flashcopy": "" - } - ) - return volume + def _prepare_mocks_for_create_snapshot(self, tp="none"): + self.client_mock.create_volume = Mock() + self.client_mock.get_volume.side_effect = [ + Munch( + {"cap": "1073741824", + "id": "0001", + "name": "source_volume", + "pool": "fake_pool", + "tp": tp, + } + ), + Mock(), + self.snapshot_response + ] + self.client_mock.get_flashcopies.return_value = self.flashcopy_response def test_create_snapshot_create_volume_error(self): - self._prepare_mocks_for_create_snapshot() self.client_mock.create_volume.side_effect = ClientException("500") with self.assertRaises(array_errors.VolumeCreationError): - self.array.create_snapshot("volume_id", "target_volume", pool=self.volume_response.pool) + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, + pool=self.volume_response.pool) def test_create_snapshot_create_fcrel_error(self): - volume = self._prepare_mocks_for_create_snapshot() - self.client_mock.create_volume.return_value = volume - self.client_mock.get_volume.return_value = volume + self.client_mock.create_volume = Mock() + self.client_mock.get_volume = Mock() self.client_mock.create_flashcopy.side_effect = ClientException("500") with self.assertRaises(Exception): - self.array.create_snapshot("volume_id", "target_volume", pool=self.volume_response.pool) + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, + pool=self.volume_response.pool) def test_create_snapshot_get_volume_not_found(self): - volume = self._prepare_mocks_for_create_snapshot() - self.client_mock.create_volume.return_value = volume + self.client_mock.create_volume = Mock() self.client_mock.get_volume.side_effect = NotFound("404") with self.assertRaises(array_errors.ObjectNotFoundError): - self.array.create_snapshot("volume_id", "target_volume", pool=self.volume_response.pool) + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, + pool=self.volume_response.pool) def test_create_snapshot_create_flashcopy_volume_not_found(self): - volume = self._prepare_mocks_for_create_snapshot() - self.client_mock.create_volume.return_value = volume - self.client_mock.get_volume.return_value = volume + self._prepare_mocks_for_create_snapshot() self.client_mock.create_flashcopy.side_effect = ClientException("500", message="00000013") with self.assertRaises(array_errors.ObjectNotFoundError): - self.array.create_snapshot("volume_id", "target_volume", pool=self.volume_response.pool) + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, + pool=self.volume_response.pool) def test_create_snapshot_already_exist(self): - volume = self._prepare_mocks_for_create_snapshot() - self.client_mock.create_volume.return_value = volume - self.client_mock.get_volume.return_value = volume - self.client_mock.create_flashcopy.side_effect = ClientException("500", - message="000000AE") + self._prepare_mocks_for_create_snapshot() + self.client_mock.create_flashcopy.side_effect = ClientException("500", message="000000AE") + with self.assertRaises(array_errors.SnapshotAlreadyExists): - self.array.create_snapshot("volume_id", "target_volume", pool=self.volume_response.pool) + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, + pool=self.volume_response.pool) def test_create_snapshot_failed_with_incorrect_id(self): self.client_mock.get_volume.side_effect = InternalServerError("500", message="BE7A0005") with self.assertRaises(array_errors.IllegalObjectID): - self.array.create_snapshot("volume_id", "test_name", pool=None) + self.array.create_snapshot("volume_id", "test_name", space_efficiency=None, pool=None) def test_create_snapshot_success(self): - volume = self._prepare_mocks_for_create_snapshot() - self.client_mock.create_volume.return_value = volume - self.client_mock.get_volume.return_value = volume - self.client_mock.create_flashcopy.return_value = self.flashcopy_response - self.client_mock.get_flashcopies.return_value = self.flashcopy_response - snapshot = self.array.create_snapshot("volume_id", "target_volume", pool=self.volume_response.pool) + self._prepare_mocks_for_create_snapshot() + snapshot_response = self.snapshot_response + snapshot = self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, pool=None) + + self.assertEqual(snapshot.name, snapshot_response.name) + self.assertEqual(snapshot.id, self.array._generate_volume_scsi_identifier(snapshot_response.id)) + self.client_mock.create_volume.assert_called_once_with(name='target_volume', capacity_in_bytes=1073741824, + pool_id='fake_pool', tp='none') + + def test_create_snapshot_with_different_pool_success(self): + self._prepare_mocks_for_create_snapshot() + + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, pool="different_pool") + + self.client_mock.create_volume.assert_called_once_with(name='target_volume', capacity_in_bytes=1073741824, + pool_id='different_pool', tp='none') - self.assertEqual(snapshot.name, volume.name) - self.assertEqual(snapshot.id, self.array._generate_volume_scsi_identifier(volume.id)) + def _test_create_snapshot_with_space_efficiency_success(self, source_volume_space_efficiency, + space_efficiency_called, space_efficiency_parameter=None): + self._prepare_mocks_for_create_snapshot(tp=source_volume_space_efficiency) + + if space_efficiency_parameter is None: + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, pool=None) + else: + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=space_efficiency_parameter, + pool=None) + + self.client_mock.create_volume.assert_called_with(name='target_volume', capacity_in_bytes=1073741824, + pool_id='fake_pool', tp=space_efficiency_called) + + def test_create_snapshot_with_specified_source_volume_space_efficiency_success(self): + self._test_create_snapshot_with_space_efficiency_success(source_volume_space_efficiency="none", + space_efficiency_called="none") + + def test_create_snapshot_with_different_request_parameter_space_efficiency_success(self): + self._test_create_snapshot_with_space_efficiency_success(source_volume_space_efficiency="none", + space_efficiency_called="ese", + space_efficiency_parameter="thin") + + def test_create_snapshot_with_different_request_parameter_empty_space_efficiency_success(self): + self._test_create_snapshot_with_space_efficiency_success(source_volume_space_efficiency="ese", + space_efficiency_called="ese", + space_efficiency_parameter="") def test_create_snapshot_not_valid(self): - volume = self._prepare_mocks_for_create_snapshot() - self.client_mock.create_volume.return_value = volume - self.client_mock.get_volume.return_value = volume + self._prepare_mocks_for_create_snapshot() flashcopy_response = Munch( {"sourcevolume": {"id": "0001"}, "targetvolume": {"id": "0002"}, "id": "0001:0002", "state": "invalid" }) - self.client_mock.create_flashcopy.return_value = flashcopy_response - with self.assertRaises(ValueError): - self.array.create_snapshot("volume_id", "target_volume", pool=self.volume_response.pool) + self.client_mock.get_flashcopies.return_value = flashcopy_response + with self.assertRaises(ValueError) as ar_context: + self.array.create_snapshot("volume_id", "target_volume", space_efficiency=None, + pool=self.volume_response.pool) + self.assertIn("invalid", str(ar_context.exception)) def _prepare_mocks_for_snapshot(self): flashcopy_as_target = self.flashcopy_response @@ -576,20 +614,20 @@ def test_delete_snapshot(self): self.client_mock.delete_volume.assert_called_once() self.client_mock.delete_flashcopy.assert_called_once_with(self.flashcopy_response.id) - def test_delete_snapshot_flashcopy_failed_with_ClientException(self): + def test_delete_snapshot_flashcopy_fail_with_ClientException(self): self._prepare_mocks_for_snapshot() self.client_mock.delete_flashcopy.side_effect = ClientException("500") self.client_mock.get_volume.return_value = self.snapshot_response with self.assertRaises(ClientException): self.array.delete_snapshot("fake_name") - def test_delete_snapshot_failed_with_client_exception(self): + def test_delete_snapshot_fail_with_ClientException(self): self._prepare_mocks_for_snapshot() self.client_mock.delete_volume.side_effect = ClientException("500") with self.assertRaises(array_errors.VolumeDeletionError): self.array.delete_snapshot("fake_id") - def test_delete_snapshot_failed_with_not_found(self): + def test_delete_snapshot_fail_with_NotFound(self): self.client_mock.get_volume.side_effect = NotFound("404") with self.assertRaises(array_errors.ObjectNotFoundError): self.array.delete_snapshot("fake_id") @@ -605,15 +643,6 @@ def test_delete_snapshot_failed_with_illegal_object_id(self): def _prepare_mocks_for_copy_to_existing_volume(self): volume = self.volume_response - self.client_mock.get_volumes_by_pool.side_effect = [[volume], [Munch( - {"cap": "1073741824", - "id": "0002", - "name": "source_name", - "pool": "fake_pool", - "tp": "ese", - "flashcopy": [] - } - )]] self.client_mock.get_volume.return_value = volume self.client_mock.get_flashcopies_by_volume.side_effect = \ [[], [self.flashcopy_response], [self.flashcopy_response]] @@ -623,7 +652,7 @@ def _prepare_mocks_for_copy_to_existing_volume(self): def test_copy_to_existing_volume_success(self): volume = self._prepare_mocks_for_copy_to_existing_volume() - self.array.copy_to_existing_volume_from_source("test_name", "source_name", 3, 2, "fake_pool") + self.array.copy_to_existing_volume_from_source(volume.id, "0002", 3, 2) self.client_mock.extend_volume.assert_called_once_with(volume_id=volume.id, new_size_in_bytes=3) self.client_mock.create_flashcopy.assert_called_once_with( @@ -633,11 +662,21 @@ def test_copy_to_existing_volume_success(self): FLASHCOPY_PERMIT_SPACE_EFFICIENT_TARGET_OPTION ]) - def test_copy_to_existing_volume_raise_not_found(self): + def _test_copy_to_existing_volume_raise_errors(self, client_method, client_error, expected_error): self._prepare_mocks_for_copy_to_existing_volume() - self.client_mock.extend_volume.side_effect = NotFound("404") - with self.assertRaises(array_errors.ObjectNotFoundError): - self.array.copy_to_existing_volume_from_source("test_name", "source_name", 3, 2, "fake_pool") + client_method.side_effect = client_error + with self.assertRaises(expected_error): + self.array.copy_to_existing_volume_from_source("test_name", "source_name", 3, 2) + + def test_copy_to_existing_volume_raise_not_found(self): + self._test_copy_to_existing_volume_raise_errors(client_method=self.client_mock.extend_volume, + client_error=NotFound("404"), + expected_error=array_errors.ObjectNotFoundError) + + def test_copy_to_existing_volume_raise_illegal_object_id(self): + self._test_copy_to_existing_volume_raise_errors(client_method=self.client_mock.get_volume, + client_error=InternalServerError("500", "BE7A0005"), + expected_error=array_errors.IllegalObjectID) def test_get_object_by_id_snapshot(self): snapshot = self._prepare_mocks_for_snapshot() @@ -668,6 +707,11 @@ def test_get_object_by_id_errors(self): with self.assertRaises(array_errors.ExpectedSnapshotButFoundVolumeError): self.array.get_object_by_id("", "snapshot") + def test_get_object_by_id_get_volume_raise_error(self): + self.client_mock.get_volume.side_effect = ClientException("500", "other error") + with self.assertRaises(ClientException): + self.array.get_object_by_id("", "volume") + def test_expand_volume_success(self): volume = self._prepare_mocks_for_volume() self.array.expand_volume(volume_id=volume.id, required_bytes=10) @@ -700,3 +744,8 @@ def test_expand_volume_extend_not_enough_space_error(self): self.client_mock.extend_volume.side_effect = [ClientException("500", message="BE531465")] with self.assertRaises(array_errors.NotEnoughSpaceInPool): self.array.expand_volume(volume_id="test_id", required_bytes=10) + + def test_expand_volume_extend_raise_error(self): + self.client_mock.extend_volume.side_effect = [ClientException("500", message="other error")] + with self.assertRaises(ClientException): + self.array.expand_volume(volume_id="test_id", required_bytes=10) diff --git a/controller/tests/array_action/svc/array_mediator_svc_test.py b/controller/tests/array_action/svc/array_mediator_svc_test.py index 8e52d3107..ee8f56a86 100644 --- a/controller/tests/array_action/svc/array_mediator_svc_test.py +++ b/controller/tests/array_action/svc/array_mediator_svc_test.py @@ -8,7 +8,7 @@ import controller.array_action.config as config import controller.array_action.errors as array_errors from controller.array_action.array_mediator_svc import SVCArrayMediator, build_kwargs_from_parameters, \ - HOST_ID_PARAM, HOST_NAME_PARAM, HOST_ISCSI_NAMES_PARAM, HOST_WWPNS_PARAM, FCMAP_STATUS_DONE + HOST_ID_PARAM, HOST_NAME_PARAM, HOST_ISCSI_NAMES_PARAM, HOST_WWPNS_PARAM, FCMAP_STATUS_DONE, YES, HOST_PORTSET_ID from controller.array_action.svc_cli_result_reader import SVCListResultsElement from controller.common.node_info import Initiators @@ -25,21 +25,23 @@ def setUp(self): node = Munch({'id': '1', 'name': 'node1', 'iscsi_name': 'iqn.1986-03.com.ibm:2145.v7k1.node1', 'status': 'online'}) self.svc.client.svcinfo.lsnode.return_value = [node] - port = Munch({'node_id': '1', 'IP_address': '1.1.1.1', 'IP_address_6': None}) - self.svc.client.svcinfo.lsportip.return_value = [port] - self.fcmaps = [Munch( - {'source_vdisk_name': 'source_name', - 'target_vdisk_name': 'target_name', - 'id': 'test_fc_id', - 'status': FCMAP_STATUS_DONE, - 'copy_rate': "non_zero_value"})] - self.fcmaps_as_source = [Munch( - {'source_vdisk_name': 'test_snapshot', + lsportip_port = Munch({'node_id': '1', 'IP_address': '1.1.1.1', 'IP_address_6': None}) + lsip_port = Munch({'node_id': '1', 'IP_address': '1.1.1.1', 'portset_id': 'demo_id'}) + self.svc.client.svcinfo.lsportip.return_value = [lsportip_port] + self.svc.client.svcinfo.lsip.return_value = [lsip_port] + self.fcmaps = [self._create_dummy_fcmap('source_name', 'test_fc_id')] + self.fcmaps_as_target = [self._create_dummy_fcmap('source_name', 'test_fc_as_target_id')] + self.fcmaps_as_source = [self._create_dummy_fcmap('test_snapshot', 'test_fc_id')] + self.svc.client.svcinfo.lsfcmap.return_value = Mock(as_list=self.fcmaps) + + def _create_dummy_fcmap(self, source_name, id_value): + return Munch( + {'source_vdisk_name': source_name, 'target_vdisk_name': 'target_name', - 'id': 'test_fc_id', + 'id': id_value, 'status': FCMAP_STATUS_DONE, - 'copy_rate': "non_zero_value"})] - self.svc.client.svcinfo.lsfcmap.return_value = Mock(as_list=self.fcmaps) + 'copy_rate': 'non_zero_value', + 'rc_controlled': 'no'}) def test_raise_ManagementIPsNotSupportError_in_init(self): self.endpoint = ["IP_1", "IP_2"] @@ -90,6 +92,7 @@ def test_get_volume_lsvdisk_cli_failure_errors(self): self._test_get_volume_lsvdisk_cli_failure_error("volume_name", 'CMMVC5753E', array_errors.ObjectNotFoundError) self._test_get_volume_lsvdisk_cli_failure_error("\xff", 'CMMVC6017E', array_errors.IllegalObjectName) self._test_get_volume_lsvdisk_cli_failure_error("12345", 'CMMVC5703E', array_errors.IllegalObjectName) + self._test_get_volume_lsvdisk_cli_failure_error("", 'other error', CLIFailureError) def test_get_volume_return_correct_value(self): cli_volume_mock = Mock(as_single_element=self._get_cli_volume()) @@ -124,8 +127,8 @@ def test_create_volume_raise_exceptions(self): self._test_create_volume_mkvolume_cli_failure_error("CMMVC5738E", array_errors.IllegalObjectName, "a" * 64) self._test_create_volume_mkvolume_cli_failure_error("CMMVC6035E", array_errors.VolumeAlreadyExists) self._test_create_volume_mkvolume_cli_failure_error("CMMVC5754E", array_errors.PoolDoesNotExist) - self._test_create_volume_mkvolume_cli_failure_error("CMMVC9292E", array_errors.PoolDoesNotMatchCapabilities) - self._test_create_volume_mkvolume_cli_failure_error("CMMVC9301E", array_errors.PoolDoesNotMatchCapabilities) + self._test_create_volume_mkvolume_cli_failure_error("CMMVC9292E", array_errors.PoolDoesNotMatchSpaceEfficiency) + self._test_create_volume_mkvolume_cli_failure_error("CMMVC9301E", array_errors.PoolDoesNotMatchSpaceEfficiency) def _test_create_volume_success(self, space_efficiency): self.svc.client.svctask.mkvolume.return_value = Mock() @@ -136,19 +139,20 @@ def _test_create_volume_success(self, space_efficiency): self.assertEqual(volume.capacity_bytes, 1024) self.assertEqual(volume.array_type, 'SVC') self.assertEqual(volume.id, 'vol_id') + self.assertEqual(volume.internal_id, 'test_id') def test_create_volume_with_thin_space_efficiency_success(self): - self._test_create_volume_success("thin") + self._test_create_volume_success(config.SPACE_EFFICIENCY_THIN) self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name", thin=True) def test_create_volume_with_compressed_space_efficiency_success(self): - self._test_create_volume_success("compressed") + self._test_create_volume_success(config.SPACE_EFFICIENCY_COMPRESSED) self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name", compressed=True) def test_create_volume_with_deduplicated_space_efficiency_success(self): - self._test_create_volume_success("deduplicated") + self._test_create_volume_success(config.SPACE_EFFICIENCY_DEDUPLICATED) self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name", compressed=True, deduplicated=True) @@ -160,7 +164,7 @@ def test_create_volume_with_empty_string_space_efficiency_success(self): self._test_create_volume_with_default_space_efficiency_success("") def test_create_volume_with_thick_space_efficiency_success(self): - self._test_create_volume_with_default_space_efficiency_success("thick") + self._test_create_volume_with_default_space_efficiency_success(config.SPACE_EFFICIENCY_THICK) def _test_delete_volume_rmvolume_cli_failure_error(self, error_message_id, expected_error, volume_name="volume"): self._test_mediator_method_client_cli_failure_error(self.svc.delete_volume, (volume_name,), @@ -197,6 +201,20 @@ def test_delete_volume_still_copy_fcmaps_not_removed(self): with self.assertRaises(array_errors.ObjectIsStillInUseError): self.svc.delete_volume("volume") + def _prepare_fcmaps_for_hyperswap(self): + self.fcmaps_as_target[0].rc_controlled = "yes" + fcmaps_as_target = Mock(as_list=self.fcmaps_as_target) + self.fcmaps[0].rc_controlled = "yes" + fcmaps_as_source = Mock(as_list=self.fcmaps) + return fcmaps_as_source, fcmaps_as_target + + def test_delete_volume_does_not_remove_hyperswap_fcmap(self): + fcmaps_as_source, fcmaps_as_target = self._prepare_fcmaps_for_hyperswap() + self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source] + self.svc.delete_volume("volume") + + self.svc.client.svctask.rmfcmap.assert_not_called() + def test_delete_volume_has_clone_fcmaps_removed(self): fcmaps_as_target = Mock(as_list=[]) fcmaps_as_source = Mock(as_list=self.fcmaps_as_source) @@ -204,6 +222,16 @@ def test_delete_volume_has_clone_fcmaps_removed(self): self.svc.delete_volume("volume") self.svc.client.svctask.rmfcmap.assert_called_once() + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_delete_volume_has_clone_rmfcmap_raise_error(self, mock_warning): + mock_warning.return_value = False + fcmaps_as_target = Mock(as_list=[]) + fcmaps_as_source = Mock(as_list=self.fcmaps_as_source) + self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source] + self.svc.client.svctask.rmfcmap.side_effect = [CLIFailureError('error')] + with self.assertRaises(CLIFailureError): + self.svc.delete_volume("volume") + def test_delete_volume_success(self): self.svc.client.svctask.rmvolume = Mock() self.svc.delete_volume("volume") @@ -213,20 +241,45 @@ def test_copy_to_existing_volume_from_source_success(self): self.svc.client.svctask.mkfcmap.assert_called_once() self.svc.client.svctask.startfcmap.assert_called_once() + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def _test_copy_to_existing_volume_raise_errors(self, mock_warning, client_return_value, expected_error): + mock_warning.return_value = False + self.svc.client.svcinfo.lsvdisk.side_effect = [client_return_value] + with self.assertRaises(expected_error): + self.svc.copy_to_existing_volume_from_source("a", "b", 1, 1) + + def test_copy_to_existing_volume_raise_not_found(self): + self._test_copy_to_existing_volume_raise_errors(client_return_value=Mock(as_single_element=None), + expected_error=array_errors.ObjectNotFoundError) + + def test_copy_to_existing_volume_raise_illegal_object_id(self): + self._test_copy_to_existing_volume_raise_errors(client_return_value=CLIFailureError('CMMVC6017E'), + expected_error=array_errors.IllegalObjectID) + self._test_copy_to_existing_volume_raise_errors(client_return_value=CLIFailureError('CMMVC5741E'), + expected_error=array_errors.IllegalObjectID) + @staticmethod def _mock_cli_object(cli_object): return Mock(as_single_element=cli_object) @staticmethod - def _get_cli_volume(): + def _get_cli_volume(with_deduplicated_copy=True): + se_copy = YES + deduplicated_copy = 'no' + compressed_copy = 'no' + if with_deduplicated_copy: + se_copy = 'no' + deduplicated_copy = YES + compressed_copy = YES return Munch({'vdisk_UID': 'vol_id', + 'id': 'test_id', 'name': 'source_volume', 'capacity': '1024', 'mdisk_grp_name': 'pool_name', 'FC_id': '', - 'se_copy': 'yes', - 'deduplicated_copy': 'no', - 'compressed_copy': 'no' + 'se_copy': se_copy, + 'deduplicated_copy': deduplicated_copy, + 'compressed_copy': compressed_copy }) @classmethod @@ -342,12 +395,11 @@ def test_get_object_by_id_volume_success(self): volume = self.svc.get_object_by_id("volume_id", "volume") self.assertEqual(volume.name, "volume_id") - def _prepare_mocks_for_create_snapshot(self, deduplicated_copy=True): + def _prepare_mocks_for_create_snapshot(self, support_deduplicated_copy=True, source_has_deduplicated_copy=False): self.svc.client.svctask.mkvolume.return_value = Mock() self.svc.client.svctask.mkfcmap.return_value = Mock() - - source_vol_to_copy_from = self._get_cli_volume() - if not deduplicated_copy: + source_vol_to_copy_from = self._get_cli_volume(source_has_deduplicated_copy) + if not support_deduplicated_copy: del source_vol_to_copy_from.deduplicated_copy target_vol_after_creation = self._get_mapless_target_cli_volume() target_vol_after_mapping = self._get_mapped_target_cli_volume() @@ -368,11 +420,12 @@ def test_create_snapshot_create_volume_error(self, mock_warning): CLIFailureError("Failed")] with self.assertRaises(CLIFailureError): - self.svc.create_snapshot("source_volume_id", "test_snapshot", "pool1") + self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1") def _test_create_snapshot_lsvdisk_cli_failure_error(self, volume_id, snapshot_name, error_message_id, - expected_error, pool=None): - self._test_mediator_method_client_cli_failure_error(self.svc.create_snapshot, (volume_id, snapshot_name, pool), + expected_error, space_efficiency=None, pool=None): + self._test_mediator_method_client_cli_failure_error(self.svc.create_snapshot, + (volume_id, snapshot_name, space_efficiency, pool), self.svc.client.svcinfo.lsvdisk, error_message_id, expected_error) @@ -385,7 +438,7 @@ def test_create_snapshot_lsvdisk_cli_failure_errors(self): def test_create_snapshot_source_not_found_error(self): self.svc.client.svcinfo.lsvdisk.side_effect = [Mock(as_single_element=None)] with self.assertRaises(array_errors.ObjectNotFoundError): - self.svc.create_snapshot("source_volume_id", "test_snapshot", "pool1") + self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1") @patch("controller.array_action.array_mediator_svc.is_warning_message") def test_create_snapshot_create_fcmap_error(self, mock_warning): @@ -395,7 +448,7 @@ def test_create_snapshot_create_fcmap_error(self, mock_warning): CLIFailureError("Failed")] with self.assertRaises(CLIFailureError): - self.svc.create_snapshot("source_volume_id", "test_snapshot", "pool1") + self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1") @patch("controller.array_action.array_mediator_svc.is_warning_message") def test_create_snapshot_start_fcmap_error(self, mock_warning): @@ -405,12 +458,12 @@ def test_create_snapshot_start_fcmap_error(self, mock_warning): CLIFailureError("Failed")] with self.assertRaises(CLIFailureError): - self.svc.create_snapshot("source_volume_id", "test_snapshot", "pool1") + self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1") def test_create_snapshot_success(self): self._prepare_mocks_for_create_snapshot() - snapshot = self.svc.create_snapshot("source_volume_id", "test_snapshot", "pool1") + snapshot = self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1") self.assertEqual(snapshot.capacity_bytes, 1024) self.assertEqual(snapshot.array_type, 'SVC') @@ -419,14 +472,28 @@ def test_create_snapshot_success(self): def test_create_snapshot_with_different_pool_success(self): self._prepare_mocks_for_create_snapshot() - self.svc.create_snapshot("source_volume_id", "test_snapshot", "different_pool") + self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="different_pool") self.svc.client.svctask.mkvolume.assert_called_once_with(name='test_snapshot', unit='b', size=1024, pool='different_pool', thin=True) + def test_create_snapshot_with_specified_source_volume_space_efficiency_success(self): + self._prepare_mocks_for_create_snapshot(source_has_deduplicated_copy=True) + + self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool=None) + self.svc.client.svctask.mkvolume.assert_called_once_with(name='test_snapshot', unit='b', size=1024, + pool='pool_name', compressed=True, deduplicated=True) + + def test_create_snapshot_with_different_space_efficiency_success(self): + self._prepare_mocks_for_create_snapshot(source_has_deduplicated_copy=True) + + self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency="thin", pool=None) + self.svc.client.svctask.mkvolume.assert_called_once_with(name='test_snapshot', unit='b', size=1024, + pool='pool_name', thin=True) + def test_create_snapshot_no_deduplicated_copy_success(self): - self._prepare_mocks_for_create_snapshot(deduplicated_copy=False) + self._prepare_mocks_for_create_snapshot(support_deduplicated_copy=False) - snapshot = self.svc.create_snapshot("source_volume_id", "test_snapshot", "pool1") + snapshot = self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1") self.assertEqual(snapshot.capacity_bytes, 1024) self.assertEqual(snapshot.array_type, 'SVC') @@ -452,6 +519,14 @@ def test_delete_snapshot_call_rmfcmap(self): self.svc.client.svctask.rmfcmap.assert_called_once_with(object_id="test_fc_id", force=True) + def test_delete_snapshot_does_not_remove_hyperswap_fcmap(self): + self._prepare_mocks_for_delete_snapshot() + fcmaps_as_source, fcmaps_as_target = self._prepare_fcmaps_for_hyperswap() + self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source] + self.svc.delete_snapshot("test_snapshot") + + self.svc.client.svctask.rmfcmap.assert_not_called() + def _test_delete_snapshot_rmvolume_cli_failure_error(self, error_message_id, expected_error, snapshot_id="snap_id"): self._test_mediator_method_client_cli_failure_error(self.svc.delete_snapshot, (snapshot_id,), self.svc.client.svctask.rmvolume, error_message_id, @@ -478,6 +553,23 @@ def test_delete_snapshot_success(self): self.assertEqual(self.svc.client.svctask.rmfcmap.call_count, 2) self.svc.client.svctask.rmvolume.assert_called_once_with(vdisk_id="test_snapshot") + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_delete_snapshot_with_fcmap_already_stopped_success(self, mock_warning): + self._prepare_mocks_for_delete_snapshot() + mock_warning.return_value = False + self.svc.client.svctask.stopfcmap.side_effect = [CLIFailureError('CMMVC5912E')] + self.svc.delete_snapshot("test_snapshot") + self.assertEqual(self.svc.client.svctask.rmfcmap.call_count, 2) + self.svc.client.svctask.rmvolume.assert_called_once_with(vdisk_id="test_snapshot") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_delete_snapshot_with_stopfcmap_raise_error(self, mock_warning): + self._prepare_mocks_for_delete_snapshot() + mock_warning.return_value = False + self.svc.client.svctask.stopfcmap.side_effect = [CLIFailureError('error')] + with self.assertRaises(CLIFailureError): + self.svc.delete_snapshot("test_snapshot") + def test_validate_supported_space_efficiency_raise_error(self): space_efficiency = "Test" with self.assertRaises( @@ -487,13 +579,13 @@ def test_validate_supported_space_efficiency_raise_error(self): def test_validate_supported_space_efficiency_success(self): no_space_efficiency = "" self.svc.validate_supported_space_efficiency(no_space_efficiency) - thin_space_efficiency = "thin" + thin_space_efficiency = config.SPACE_EFFICIENCY_THIN self.svc.validate_supported_space_efficiency(thin_space_efficiency) - thick_space_efficiency = "thick" + thick_space_efficiency = config.SPACE_EFFICIENCY_THICK self.svc.validate_supported_space_efficiency(thick_space_efficiency) - compressed_space_efficiency = "compressed" + compressed_space_efficiency = config.SPACE_EFFICIENCY_COMPRESSED self.svc.validate_supported_space_efficiency(compressed_space_efficiency) - deduplicated_space_efficiency = "deduplicated" + deduplicated_space_efficiency = config.SPACE_EFFICIENCY_DEDUPLICATED self.svc.validate_supported_space_efficiency(deduplicated_space_efficiency) def test_build_kwargs_from_parameters(self): @@ -520,17 +612,20 @@ def test_properties(self): self.assertEqual(SVCArrayMediator.max_connections, 2) self.assertEqual(SVCArrayMediator.max_lun_retries, 10) + def _prepare_mocks_for_get_host_by_identifiers_(self, result_reader_iter, hosts): + self.svc.client.svcinfo.lshost = Mock() + self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) + self.svc.client.send_raw_command = Mock() + self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES + result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) + @patch("controller.array_action.svc_cli_result_reader.SVCListResultsReader.__iter__") def test_get_host_by_identifiers_returns_host_not_found(self, result_reader_iter): host_1 = self._get_host_as_dictionary('host_id_1', 'test_host_1', ['iqn.test.1'], []) host_2 = self._get_host_as_dictionary('host_id_2', 'test_host_2', ['iqn.test.2'], []) host_3 = self._get_host_as_dictionary('host_id_3', 'test_host_3', ['iqn.test.3'], []) hosts = [host_1, host_2, host_3] - self.svc.client.svcinfo.lshost = Mock() - self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) - self.svc.client.send_raw_command = Mock() - self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES - result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) + self._prepare_mocks_for_get_host_by_identifiers_(result_reader_iter, hosts) with self.assertRaises(array_errors.HostNotFoundError): self.svc.get_host_by_host_identifiers(Initiators('Test_iqn', ['Test_wwn'])) @@ -547,11 +642,7 @@ def test_get_host_by_identifiers_raise_multiplehostsfounderror(self, result_read host_2 = self._get_host_as_dictionary('host_id_2', 'test_host_2', ['iqn.test.3'], []) host_3 = self._get_host_as_dictionary('host_id_3', 'test_host_3', [], ['Test_wwn']) hosts = [host_1, host_2, host_3] - self.svc.client.svcinfo.lshost = Mock() - self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) - self.svc.client.send_raw_command = Mock() - self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES - result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) + self._prepare_mocks_for_get_host_by_identifiers_(result_reader_iter, hosts) with self.assertRaises(array_errors.MultipleHostsFoundError): self.svc.get_host_by_host_identifiers(Initiators('iqn.test.3', ['Test_wwn'])) @@ -561,11 +652,7 @@ def test_get_host_by_identifiers_return_iscsi_host(self, result_reader_iter): host_2 = self._get_host_as_dictionary('host_id_2', 'test_host_2', ['iqn.test.2'], ['abc3']) host_3 = self._get_host_as_dictionary('host_id_3', 'test_host_3', ['iqn.test.3'], ['abc3']) hosts = [host_1, host_2, host_3] - self.svc.client.svcinfo.lshost = Mock() - self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) - self.svc.client.send_raw_command = Mock() - self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES - result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) + self._prepare_mocks_for_get_host_by_identifiers_(result_reader_iter, hosts) host, connectivity_type = self.svc.get_host_by_host_identifiers(Initiators( 'iqn.test.2', ['abcd3'])) self.assertEqual('test_host_2', host) @@ -577,11 +664,7 @@ def test_get_host_by_identifiers_return_iscsi_host_with_list_iqn(self, result_re host_2 = self._get_host_as_dictionary('host_id_2', 'test_host_2', ['iqn.test.2', 'iqn.test.22'], ['abc3']) host_3 = self._get_host_as_dictionary('host_id_3', 'test_host_3', ['iqn.test.3'], ['abc3']) hosts = [host_1, host_2, host_3] - self.svc.client.svcinfo.lshost = Mock() - self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) - self.svc.client.send_raw_command = Mock() - self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES - result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) + self._prepare_mocks_for_get_host_by_identifiers_(result_reader_iter, hosts) host, connectivity_type = self.svc.get_host_by_host_identifiers(Initiators( 'iqn.test.2', ['abcd3'])) self.assertEqual('test_host_2', host) @@ -593,11 +676,7 @@ def test_get_host_by_identifiers_return_fc_host(self, result_reader_iter): host_2 = self._get_host_as_dictionary('host_id_2', 'test_host_2', [''], ['abc2']) host_3 = self._get_host_as_dictionary('host_id_3', 'test_host_3', ['iqn.test.3'], ['abc1', 'abc3']) hosts = [host_1, host_2, host_3] - self.svc.client.svcinfo.lshost = Mock() - self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) - self.svc.client.send_raw_command = Mock() - self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES - result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) + self._prepare_mocks_for_get_host_by_identifiers_(result_reader_iter, hosts) host, connectivity_type = self.svc.get_host_by_host_identifiers(Initiators( 'iqn.test.6', ['abc3', 'ABC1'])) self.assertEqual('test_host_3', host) @@ -615,11 +694,7 @@ def test_get_host_by_identifiers_with_wrong_fc_iscsi_raise_not_found(self, resul host_2 = self._get_host_as_dictionary('host_id_2', 'test_host_2', ['iqn.test.2'], ['abc3']) host_3 = self._get_host_as_dictionary('host_id_3', 'test_host_3', ['iqn.test.3'], ['abc3']) hosts = [host_1, host_2, host_3] - self.svc.client.svcinfo.lshost = Mock() - self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) - self.svc.client.send_raw_command = Mock() - self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES - result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) + self._prepare_mocks_for_get_host_by_identifiers_(result_reader_iter, hosts) with self.assertRaises(array_errors.HostNotFoundError): self.svc.get_host_by_host_identifiers(Initiators('', [])) result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) @@ -632,22 +707,20 @@ def test_get_host_by_identifiers_return_iscsi_and_fc_all_support(self, result_re host_2 = self._get_host_as_dictionary('host_id_2', 'test_host_2', ['iqn.test.6'], ['abcd3']) host_3 = self._get_host_as_dictionary('host_id_3', 'test_host_3', ['iqn.test.2'], ['abc3']) hosts = [host_1, host_2, host_3] - self.svc.client.svcinfo.lshost = Mock() - self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) - self.svc.client.send_raw_command = Mock() - self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES - result_reader_iter.return_value = self._get_detailed_hosts_list_result(hosts) + self._prepare_mocks_for_get_host_by_identifiers_(result_reader_iter, hosts) host, connectivity_type = self.svc.get_host_by_host_identifiers(Initiators('iqn.test.2', ['ABC3'])) self.assertEqual('test_host_3', host) self.assertEqual([config.ISCSI_CONNECTIVITY_TYPE, config.FC_CONNECTIVITY_TYPE], connectivity_type) - def _get_host_as_dictionary(self, id, name, iscsi_names_list, wwpns_list): + def _get_host_as_dictionary(self, id, name, iscsi_names_list, wwpns_list, portset_id=None): res = {HOST_ID_PARAM: id, HOST_NAME_PARAM: name} if iscsi_names_list: res[HOST_ISCSI_NAMES_PARAM] = iscsi_names_list if wwpns_list: res[HOST_WWPNS_PARAM] = wwpns_list + if portset_id: + res[HOST_PORTSET_ID] = portset_id return res def _get_hosts_list_result(self, hosts_dict): @@ -667,6 +740,7 @@ def _get_detailed_hosts_list_result(self, hosts_dict): if wwpns_list: for wwpn in wwpns_list: current_element.add(HOST_WWPNS_PARAM, wwpn) + current_element.add(HOST_PORTSET_ID, host_dict.get(HOST_PORTSET_ID)) detailed_hosts_list.append(current_element) return iter(detailed_hosts_list) @@ -781,55 +855,81 @@ def test_unmap_volume_success(self): self.svc.client.svctask.rmvdiskhostmap.return_value = None self.svc.unmap_volume("volume", "host") + def _prepare_mocks_for_get_iscsi_targets(self, portset_id=None): + host = self._get_host_as_dictionary('host_id', 'test_host', ['iqn.test.0', 'iqn.test.00'], ['abc0'], portset_id) + self.svc.client.svcinfo.lshost = Mock() + self.svc.client.svcinfo.lshost.return_value = Mock(as_single_element=host) + + def test_get_iscsi_targets_cmd_error_raise_host_not_found(self): + self.svc.client.svcinfo.lshost.return_value = Mock(as_single_element=[]) + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.get_iscsi_targets_by_iqn('test_host') + def test_get_iscsi_targets_cmd_error_raise_no_targets_error(self): + self._prepare_mocks_for_get_iscsi_targets() self.svc.client.svcinfo.lsportip.side_effect = [ svc_errors.CommandExecutionError('Failed')] with self.assertRaises(array_errors.NoIscsiTargetsFoundError): - self.svc.get_iscsi_targets_by_iqn() + self.svc.get_iscsi_targets_by_iqn('test_host') def test_get_iscsi_targets_cli_error_raise_no_targets_error(self): + self._prepare_mocks_for_get_iscsi_targets() self.svc.client.svcinfo.lsportip.side_effect = [ CLIFailureError("Failed")] with self.assertRaises(array_errors.NoIscsiTargetsFoundError): - self.svc.get_iscsi_targets_by_iqn() + self.svc.get_iscsi_targets_by_iqn('test_host') def test_get_iscsi_targets_no_online_node_raise_no_targets_error(self): + self._prepare_mocks_for_get_iscsi_targets() node = Munch({'id': '1', 'name': 'node1', 'iscsi_name': 'iqn.1986-03.com.ibm:2145.v7k1.node1', 'status': 'offline'}) self.svc.client.svcinfo.lsnode.return_value = [node] with self.assertRaises(array_errors.NoIscsiTargetsFoundError): - self.svc.get_iscsi_targets_by_iqn() + self.svc.get_iscsi_targets_by_iqn('test_host') def test_get_iscsi_targets_no_nodes_nor_ips_raise_no_targets_error(self): + self._prepare_mocks_for_get_iscsi_targets() self.svc.client.svcinfo.lsnode.return_value = [] self.svc.client.svcinfo.lsportip.return_value = [] with self.assertRaises(array_errors.NoIscsiTargetsFoundError): - self.svc.get_iscsi_targets_by_iqn() + self.svc.get_iscsi_targets_by_iqn('test_host') def test_get_iscsi_targets_no_port_with_ip_raise_no_targets_error(self): + self._prepare_mocks_for_get_iscsi_targets() port_1 = Munch({'node_id': '1', 'IP_address': None, 'IP_address_6': ''}) port_2 = Munch({'node_id': '2', 'IP_address': '', 'IP_address_6': None}) self.svc.client.svcinfo.lsportip.return_value = [port_1, port_2] with self.assertRaises(array_errors.NoIscsiTargetsFoundError): - self.svc.get_iscsi_targets_by_iqn() + self.svc.get_iscsi_targets_by_iqn('test_host') def test_get_iscsi_targets_no_ip_raise_no_targets_error(self): + self._prepare_mocks_for_get_iscsi_targets() self.svc.client.svcinfo.lsportip.return_value = [] with self.assertRaises(array_errors.NoIscsiTargetsFoundError): - self.svc.get_iscsi_targets_by_iqn() + self.svc.get_iscsi_targets_by_iqn('test_host') + + def test_get_iscsi_targets_with_lsportip_success(self): + self._prepare_mocks_for_get_iscsi_targets() + ips_by_iqn = self.svc.get_iscsi_targets_by_iqn('test_host') + self.svc.client.svcinfo.lsportip.assert_called_once() + self.assertEqual(ips_by_iqn, {'iqn.1986-03.com.ibm:2145.v7k1.node1': ['1.1.1.1']}) - def test_get_iscsi_targets_success(self): - ips_by_iqn = self.svc.get_iscsi_targets_by_iqn() + def test_get_iscsi_targets_with_lsip_success(self): + self._prepare_mocks_for_get_iscsi_targets(portset_id='demo_id') + ips_by_iqn = self.svc.get_iscsi_targets_by_iqn('test_host') + self.svc.client.svcinfo.lsip.assert_called_once_with(filtervalue='portset_id=demo_id') + self.svc.client.svcinfo.lsportip.not_called() self.assertEqual(ips_by_iqn, {'iqn.1986-03.com.ibm:2145.v7k1.node1': ['1.1.1.1']}) def test_get_iscsi_targets_with_exception(self): self.svc.client.svcinfo.lsnode.side_effect = [Exception] with self.assertRaises(Exception): - self.svc.get_iscsi_targets_by_iqn() + self.svc.get_iscsi_targets_by_iqn('test_host') def test_get_iscsi_targets_with_multi_nodes(self): + self._prepare_mocks_for_get_iscsi_targets() node1 = Munch({'id': '1', 'name': 'node1', 'iscsi_name': 'iqn.1986-03.com.ibm:2145.v7k1.node1', @@ -844,7 +944,7 @@ def test_get_iscsi_targets_with_multi_nodes(self): port_3 = Munch({'node_id': '2', 'IP_address': '', 'IP_address_6': '1::1'}) self.svc.client.svcinfo.lsportip.return_value = [port_1, port_2, port_3] - ips_by_iqn = self.svc.get_iscsi_targets_by_iqn() + ips_by_iqn = self.svc.get_iscsi_targets_by_iqn('test_host') self.assertEqual(ips_by_iqn, {'iqn.1986-03.com.ibm:2145.v7k1.node1': ['1.1.1.1', '2.2.2.2'], 'iqn.1986-03.com.ibm:2145.v7k1.node2': ['[1::1]']}) @@ -900,6 +1000,15 @@ def test_expand_volume_raise_object_in_use(self): self.svc.expand_volume('vol_id', 2) self.svc.client.svctask.expandvdisksize.assert_not_called() + def test_expand_volume_in_hyperswap(self): + self._prepare_mocks_for_expand_volume() + fcmaps_as_source, fcmaps_as_target = self._prepare_fcmaps_for_hyperswap() + self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source] + self.svc.expand_volume('vol_id', 1024) + + self.svc.client.svctask.expandvdisksize.assert_called_once_with(vdisk_id='test_volume', unit='b', size=512) + self.svc.client.svctask.rmfcmap.assert_not_called() + def test_expand_volume_raise_object_not_found(self): self.svc.client.svcinfo.lsvdisk.return_value = Mock(as_single_element=None) with self.assertRaises(array_errors.ObjectNotFoundError): diff --git a/controller/tests/array_action/xiv/array_mediator_xiv_tests.py b/controller/tests/array_action/xiv/array_mediator_xiv_tests.py index c5a18d539..21d6012bf 100644 --- a/controller/tests/array_action/xiv/array_mediator_xiv_tests.py +++ b/controller/tests/array_action/xiv/array_mediator_xiv_tests.py @@ -1,6 +1,6 @@ import unittest -from mock import patch, Mock +from mock import patch, Mock, call from munch import Munch from pyxcli import errors as xcli_errors @@ -68,12 +68,14 @@ def test_close(self): self.mediator.client.close.assert_called_once_with() @staticmethod - def _get_cli_volume(): + def _get_cli_volume(name='mock_volume', wwn='123'): return Munch({ - 'wwn': '123', - 'name': 'mock_volume', + 'wwn': wwn, + 'name': name, + 'id': 'test_id', 'pool_name': 'fake_pool', - 'capacity': '512'}) + 'capacity': '512', + 'copy_master_wwn': wwn}) def _test_create_volume_with_space_efficiency_success(self, space_efficiency): self.mediator.client.cmd.vol_create = Mock() @@ -82,6 +84,7 @@ def _test_create_volume_with_space_efficiency_success(self, space_efficiency): self.mediator.client.cmd.vol_create.assert_called_once_with(vol='mock_volume', size_blocks=1, pool='fake_pool') self.assertEqual(volume.name, "mock_volume") + self.assertEqual(volume.internal_id, 'test_id') def test_create_volume_success(self): self._test_create_volume_with_space_efficiency_success(None) @@ -89,6 +92,14 @@ def test_create_volume_success(self): def test_create_volume_with_empty_space_efficiency_success(self): self._test_create_volume_with_space_efficiency_success("") + def test_create_volume_with_not_available_wwn(self): + self.mediator.client.cmd.vol_create = Mock() + self.mediator.client.cmd.vol_create.return_value = Mock( + as_single_element=self._get_cli_volume(wwn="Not Available")) + volume = self.mediator.create_volume("mock_volume", 512, None, "fake_pool") + + self.assertIsNone(volume.copy_source_id) + def test_create_volume_raise_illegal_name_for_object(self): self.mediator.client.cmd.vol_create.side_effect = [xcli_errors.IllegalNameForObjectError("", "volume", "")] with self.assertRaises(array_errors.IllegalObjectName): @@ -110,64 +121,79 @@ def test_create_volume_raise_no_space_error(self): with self.assertRaises(array_errors.NotEnoughSpaceInPool): self.mediator.create_volume("volume", 10, None, "pool1") + def test_create_volume_raise_runtime_error(self): + self.mediator.client.cmd.vol_create.side_effect = [ + xcli_errors.CommandFailedRuntimeError("", "other error", "")] + with self.assertRaises(xcli_errors.CommandFailedRuntimeError): + self.mediator.create_volume("volume", 10, None, "pool1") + @patch.object(XIVArrayMediator, "_generate_volume_response") def test_create_volume__generate_volume_response_raise_exception(self, response): response.side_effect = Exception("err") with self.assertRaises(Exception): self.mediator.create_volume("volume", 10, None, "pool1") - def test_copy_to_existing_volume_from_snapshot_succeeds_with_resize(self): + def _test_copy_to_existing_volume_from_snapshot(self, src_snapshot_capacity_in_bytes, + min_volume_size_in_bytes): + volume_id = "volume_id" + source_id = "source_id" volume_name = "volume" src_snapshot_name = "snapshot" - src_snapshot_capacity_in_bytes = 500 - min_volume_size_in_bytes = 1000 self.mediator.client.cmd.vol_format = Mock() self.mediator.client.cmd.vol_copy = Mock() self.mediator.client.cmd.vol_resize = Mock() - self.mediator.copy_to_existing_volume_from_source(volume_name, src_snapshot_name, + target_volume = self._get_cli_volume(name=volume_name) + source_volume = self._get_cli_volume(name=src_snapshot_name) + self.mediator.client.cmd.vol_list.side_effect = [Mock(as_single_element=target_volume), + Mock(as_single_element=source_volume)] + self.mediator.copy_to_existing_volume_from_source(volume_id, source_id, src_snapshot_capacity_in_bytes, min_volume_size_in_bytes) - volume_size_in_blocks = 1 + calls = [call(wwn=volume_id), call(wwn=source_id)] + self.mediator.client.cmd.vol_list.assert_has_calls(calls, any_order=False) self.mediator.client.cmd.vol_format.assert_called_once_with(vol=volume_name) self.mediator.client.cmd.vol_copy.assert_called_once_with(vol_src=src_snapshot_name, vol_trg=volume_name) - self.mediator.client.cmd.vol_resize.assert_called_once_with(vol=volume_name, - size_blocks=volume_size_in_blocks) - def test_copy_to_existing_volume_from_snapshot_succeeds_without_resize(self): + def test_copy_to_existing_volume_from_snapshot_succeeds_with_resize(self): + volume_size_in_blocks = 1 volume_name = "volume" - src_snapshot_name = "snapshot" - src_snapshot_capacity_in_bytes = 1000 - min_volume_size_in_bytes = 500 - self.mediator.client.cmd.vol_format = Mock() - self.mediator.client.cmd.vol_copy = Mock() - self.mediator.client.cmd.vol_resize = Mock() - self.mediator.copy_to_existing_volume_from_source(volume_name, src_snapshot_name, - src_snapshot_capacity_in_bytes, min_volume_size_in_bytes) - self.mediator.client.cmd.vol_format.assert_called_once_with(vol=volume_name) - self.mediator.client.cmd.vol_copy.assert_called_once_with(vol_src=src_snapshot_name, vol_trg=volume_name) + self._test_copy_to_existing_volume_from_snapshot(src_snapshot_capacity_in_bytes=500, + min_volume_size_in_bytes=1000) + + self.mediator.client.cmd.vol_resize.assert_called_once_with(vol=volume_name, size_blocks=volume_size_in_blocks) + + def test_copy_to_existing_volume_from_snapshot_succeeds_without_resize(self): + self._test_copy_to_existing_volume_from_snapshot(src_snapshot_capacity_in_bytes=1000, + min_volume_size_in_bytes=500) + self.mediator.client.cmd.vol_resize.assert_not_called() - def test_copy_to_existing_volume_from_snapshot_failed_illegal_name(self): - self._test_copy_to_existing_volume_from_snapshot_error(xcli_errors.IllegalNameForObjectError("", "", ""), - array_errors.IllegalObjectName) + def _test_copy_to_existing_volume_from_snapshot_error(self, client_method, xcli_exception, + expected_array_exception): + client_method.side_effect = [xcli_exception] + with self.assertRaises(expected_array_exception): + self.mediator.copy_to_existing_volume_from_source("volume", "snapshot", 0, 0) + + def test_copy_to_existing_volume_from_snapshot_failed_illegal_id(self): + self._test_copy_to_existing_volume_from_snapshot_error(self.mediator.client.cmd.vol_list, + xcli_errors.IllegalValueForArgumentError("", "", ""), + array_errors.IllegalObjectID) def test_copy_to_existing_volume_from_snapshot_failed_volume_not_found(self): - self._test_copy_to_existing_volume_from_snapshot_error(xcli_errors.VolumeBadNameError("", "", ""), + self._test_copy_to_existing_volume_from_snapshot_error(self.mediator.client.cmd.vol_copy, + xcli_errors.VolumeBadNameError("", "", ""), array_errors.ObjectNotFoundError) def test_copy_to_existing_volume_from_snapshot_failed_snapshot_not_found(self): - self._test_copy_to_existing_volume_from_snapshot_error(xcli_errors.SourceVolumeBadNameError("", "", ""), + self._test_copy_to_existing_volume_from_snapshot_error(self.mediator.client.cmd.vol_copy, + xcli_errors.SourceVolumeBadNameError("", "", ""), array_errors.ObjectNotFoundError) def test_copy_to_existing_volume_from_snapshot_failed_permission_denied(self): self._test_copy_to_existing_volume_from_snapshot_error( + self.mediator.client.cmd.vol_copy, xcli_errors.OperationForbiddenForUserCategoryError("", "", ""), array_errors.PermissionDeniedError) - def _test_copy_to_existing_volume_from_snapshot_error(self, xcli_exception, expected_array_exception): - self.mediator.client.cmd.vol_copy.side_effect = [xcli_exception] - with self.assertRaises(expected_array_exception): - self.mediator.copy_to_existing_volume_from_source("volume", "snapshot", 0, 0) - def test_delete_volume_return_volume_not_found(self): self.mediator.client.cmd.vol_list.return_value = Mock(as_single_element=None) with self.assertRaises(array_errors.ObjectNotFoundError): @@ -242,7 +268,7 @@ def test_create_snapshot_succeeds(self): xcli_snapshot = self._get_single_snapshot_result_mock(snapshot_name, snapshot_volume_name, snapshot_capacity=size_in_blocks_string) self.mediator.client.cmd.snapshot_create.return_value = xcli_snapshot - res = self.mediator.create_snapshot(snapshot_volume_wwn, snapshot_name) + res = self.mediator.create_snapshot(snapshot_volume_wwn, snapshot_name, space_efficiency=None, pool=None) self.assertEqual(res.name, snapshot_name) self.assertEqual(res.source_volume_id, snapshot_volume_wwn) self.assertEqual(res.capacity_bytes, size_in_bytes) @@ -254,7 +280,8 @@ def test_create_snapshot_raise_snapshot_source_pool_mismatch(self): xcli_volume = self._get_cli_volume() self.mediator.client.cmd.vol_list.return_value = Mock(as_single_element=xcli_volume) with self.assertRaises(array_errors.SnapshotSourcePoolMismatch): - self.mediator.create_snapshot(snapshot_volume_wwn, snapshot_name, "different_pool") + self.mediator.create_snapshot(snapshot_volume_wwn, snapshot_name, space_efficiency=None, + pool="different_pool") def test_create_snapshot_raise_illegal_name_for_object(self): self._test_create_snapshot_error(xcli_errors.IllegalNameForObjectError, array_errors.IllegalObjectName) @@ -273,18 +300,18 @@ def test_create_snapshot_raise_illegal_object_id(self): self.mediator.client.cmd.vol_list.side_effect = [xcli_errors.IllegalValueForArgumentError("", "snapshot-wwn", "")] with self.assertRaises(array_errors.IllegalObjectID): - self.mediator.create_snapshot("volume_id", "snapshot", "pool1") + self.mediator.create_snapshot("volume_id", "snapshot", space_efficiency=None, pool="pool1") @patch.object(XIVArrayMediator, "_generate_snapshot_response") def test_create_snapshot_generate_snapshot_response_raise_exception(self, response): response.side_effect = Exception("err") with self.assertRaises(Exception): - self.mediator.create_snapshot("volume_id", "snapshot", "pool1") + self.mediator.create_snapshot("volume_id", "snapshot", space_efficiency=None, pool="pool1") def _test_create_snapshot_error(self, xcli_exception, expected_exception): self.mediator.client.cmd.snapshot_create.side_effect = [xcli_exception("", "snapshot", "")] with self.assertRaises(expected_exception): - self.mediator.create_snapshot("volume_id", "snapshot", None) + self.mediator.create_snapshot("volume_id", "snapshot", space_efficiency=None, pool=None) def _get_single_snapshot_result_mock(self, snapshot_name, snapshot_volume_name, snapshot_capacity="17"): snapshot_wwn = "1235678" @@ -580,7 +607,7 @@ def test_get_iscsi_targets_by_iqn_fail(self): self.mediator.client.cmd.ipinterface_list.return_value = [] with self.assertRaises(Exception): - self.mediator.get_iscsi_targets_by_iqn() + self.mediator.get_iscsi_targets_by_iqn('test_host') def test_get_iscsi_targets_by_iqn_success(self): config_param = utils.get_mock_xiv_config_param(name="iscsi_name", value="iqn1") @@ -589,7 +616,7 @@ def test_get_iscsi_targets_by_iqn_success(self): ip_interface6 = utils.get_mock_xiv_ip_interface("iSCSI", address6="::1") self.mediator.client.cmd.ipinterface_list.return_value = [ip_interface, ip_interface6] - targets_by_iqn = self.mediator.get_iscsi_targets_by_iqn() + targets_by_iqn = self.mediator.get_iscsi_targets_by_iqn('test_host') self.assertEqual(targets_by_iqn, {"iqn1": ["1.2.3.4", "[::1]"]}) diff --git a/controller/tests/controller_server/addons_server_test.py b/controller/tests/controller_server/addons_server_test.py new file mode 100644 index 000000000..ab12ef0df --- /dev/null +++ b/controller/tests/controller_server/addons_server_test.py @@ -0,0 +1,59 @@ +import grpc +from mock import patch, Mock + +from controller.controller_server.addons_server import ReplicationControllerServicer +from controller.csi_general import replication_pb2 as pb2 +from controller.tests import utils +from controller.tests.controller_server.csi_controller_server_test import BaseControllerSetUp, CommonControllerTest +from controller.controller_server.config import PARAMETERS_SYSTEM_ID, PARAMETERS_COPY_TYPE +from controller.controller_server.test_settings import volume_name, volume_wwn, object_internal_id, \ + other_object_internal_id, replication_name, system_id, copy_type + + +class TestControllerServicerEnableVolumeReplication(BaseControllerSetUp, CommonControllerTest): + def get_tested_method(self): + return self.servicer.EnableVolumeReplication + + def get_tested_method_response_class(self): + return pb2.EnableVolumeReplicationResponse + + def setUp(self): + super().setUp() + self.servicer = ReplicationControllerServicer() + self.request.volume_id = "{}:{};{}".format("A9000", object_internal_id, volume_wwn) + self.request.replication_id = "{}:{};{}".format("A9000", other_object_internal_id, volume_wwn) + self.request.parameters.update({PARAMETERS_SYSTEM_ID: system_id, + PARAMETERS_COPY_TYPE: copy_type}) + self.mediator.get_object_by_id = Mock() + self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(10, volume_name, + volume_wwn, "xiv") + self.mediator.get_replication = Mock() + replication_mock = utils.get_mock_mediator_response_replication(replication_name, + object_internal_id, + other_object_internal_id) + self.mediator.get_replication.return_value = replication_mock + + def _prepare_enable_replication_mocks(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.mediator.get_replication = Mock() + self.mediator.get_replication.return_value = None + self.mediator.create_replication = Mock() + + @patch("controller.controller_server.addons_server.get_agent") + def test_enable_replication_succeeds(self, storage_agent): + self._prepare_enable_replication_mocks(storage_agent) + + self.servicer.EnableVolumeReplication(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.get_replication.assert_called_once_with(object_internal_id, other_object_internal_id, system_id) + self.mediator.create_replication.assert_called_once_with(object_internal_id, other_object_internal_id, + system_id, copy_type) + + @patch("controller.controller_server.addons_server.get_agent") + def test_enable_replication_with_wrong_secrets(self, storage_agent): + self._test_request_with_wrong_secrets(storage_agent) + + @patch("controller.controller_server.addons_server.get_agent") + def test_enable_replication_with_array_connection_exception(self, storage_agent): + self._test_request_with_array_connection_exception(storage_agent) diff --git a/controller/tests/controller_server/csi_controller_server_test.py b/controller/tests/controller_server/csi_controller_server_test.py index 6705ffb2c..fd1276e01 100644 --- a/controller/tests/controller_server/csi_controller_server_test.py +++ b/controller/tests/controller_server/csi_controller_server_test.py @@ -10,14 +10,15 @@ import controller.controller_server.config as config import controller.controller_server.errors as controller_errors from controller.array_action.array_mediator_xiv import XIVArrayMediator -from controller.controller_server.csi_controller_server import ControllerServicer +from controller.controller_server.csi_controller_server import CSIControllerServicer from controller.controller_server.test_settings import volume_name, snapshot_name, snapshot_volume_name, \ - clone_volume_name, snapshot_volume_wwn, pool + clone_volume_name, snapshot_volume_wwn, pool, space_efficiency, object_internal_id from controller.csi_general import csi_pb2 from controller.tests import utils +from controller.tests.utils import ProtoBufMock -class AbstractControllerTest(unittest.TestCase): +class BaseControllerSetUp(unittest.TestCase): def setUp(self): patch("controller.array_action.array_mediator_xiv.XIVArrayMediator._connect").start() @@ -25,93 +26,116 @@ def setUp(self): self.detect_array_type = detect_array_type_patcher.start() self.detect_array_type.return_value = "a9k" self.addCleanup(detect_array_type_patcher.stop) + self.fqdn = "fqdn" + self.mediator = XIVArrayMediator("user", "password", self.fqdn) + self.mediator.client = Mock() + + self.storage_agent = MagicMock() + self.storage_agent.get_mediator.return_value.__enter__.return_value = self.mediator + + self.servicer = CSIControllerServicer() + self.request = ProtoBufMock() self.request.secrets = {"username": "user", "password": "pass", "management_address": "mg"} + self.request.parameters = {} + self.request.volume_context = {} + self.volume_capability = utils.get_mock_volume_capability() + self.capacity_bytes = 10 + self.request.capacity_range = Mock() + self.request.capacity_range.required_bytes = self.capacity_bytes + self.mediator.maximal_volume_size_in_bytes = 10 + self.mediator.minimal_volume_size_in_bytes = 2 + self.context = utils.FakeContext() + + +class CommonControllerTest: + @abc.abstractmethod - def get_create_object_method(self): + def get_tested_method(self): raise NotImplementedError @abc.abstractmethod - def get_create_object_response_method(self): + def get_tested_method_response_class(self): raise NotImplementedError def _test_create_object_with_empty_name(self, storage_agent): storage_agent.return_value = self.storage_agent self.request.name = "" context = utils.FakeContext() - res = self.get_create_object_method()(self.request, context) + res = self.get_tested_method()(self.request, context) self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT) self.assertIn("name", context.details) - self.assertEqual(res, self.get_create_object_response_method()()) + self.assertEqual(res, self.get_tested_method_response_class()()) - def _test_create_object_with_wrong_secrets_parameters(self, secrets, message="secret"): + def _test_request_with_wrong_secrets_parameters(self, secrets, message="secret"): context = utils.FakeContext() self.request.secrets = secrets - self.get_create_object_method()(self.request, context) + self.get_tested_method()(self.request, context) self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT) self.assertIn(message, context.details) - def _test_create_object_with_wrong_secrets(self, storage_agent): + def _test_request_with_wrong_secrets(self, storage_agent): storage_agent.return_value = self.storage_agent secrets = {"password": "pass", "management_address": "mg"} - self._test_create_object_with_wrong_secrets_parameters(secrets) + self._test_request_with_wrong_secrets_parameters(secrets) secrets = {"username": "user", "management_address": "mg"} - self._test_create_object_with_wrong_secrets_parameters(secrets) + self._test_request_with_wrong_secrets_parameters(secrets) secrets = {"username": "user", "password": "pass"} - self._test_create_object_with_wrong_secrets_parameters(secrets) + self._test_request_with_wrong_secrets_parameters(secrets) secrets = utils.get_fake_secret_config(system_id="u-") - self._test_create_object_with_wrong_secrets_parameters(secrets, message="system id") + self._test_request_with_wrong_secrets_parameters(secrets, message="system id") self.request.secrets = [] - def _test_create_object_with_array_connection_exception(self, storage_agent): + def _test_request_with_array_connection_exception(self, storage_agent): storage_agent.side_effect = [Exception("error")] context = utils.FakeContext() - self.get_create_object_method()(self.request, context) - self.assertEqual(context.code, grpc.StatusCode.INTERNAL, "connection error occured in array_connection") + self.get_tested_method()(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL) self.assertIn("error", context.details) - def _test_create_object_with_get_array_type_exception(self, storage_agent): + def _test_request_with_get_array_type_exception(self, storage_agent): storage_agent.return_value = self.storage_agent context = utils.FakeContext() self.detect_array_type.side_effect = [array_errors.FailedToFindStorageSystemType("endpoint")] - self.get_create_object_method()(self.request, context) - self.assertEqual(context.code, grpc.StatusCode.INTERNAL, "failed to find storage system") + self.get_tested_method()(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL) msg = array_errors.FailedToFindStorageSystemType("endpoint").message self.assertIn(msg, context.details) + def _test_request_with_wrong_parameters(self, storage_agent): + storage_agent.return_value = self.storage_agent + context = utils.FakeContext() + parameters = [{}, {"": ""}, {"pool": ""}] -class TestControllerServerCreateSnapshot(AbstractControllerTest): + for request_parameters in parameters: + self.request.parameters = request_parameters + self.get_tested_method()(self.request, context) + self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, context.code) - def get_create_object_method(self): + +class TestCreateSnapshot(BaseControllerSetUp, CommonControllerTest): + + def get_tested_method(self): return self.servicer.CreateSnapshot - def get_create_object_response_method(self): + def get_tested_method_response_class(self): return csi_pb2.CreateSnapshotResponse def setUp(self): super().setUp() - self.fqdn = "fqdn" - self.mediator = XIVArrayMediator("user", "password", self.fqdn) - self.mediator.client = Mock() + self.mediator.get_snapshot = Mock() self.mediator.get_snapshot.return_value = None - self.storage_agent = MagicMock() - self.storage_agent.get_mediator.return_value.__enter__.return_value = self.mediator - - self.servicer = ControllerServicer(self.fqdn) - - self.request.parameters = {} - self.capacity_bytes = 10 self.request.name = snapshot_name - self.request.source_volume_id = "{}:{}".format("A9000", snapshot_volume_wwn) + self.request.source_volume_id = "{}:{};{}".format("A9000", object_internal_id, snapshot_volume_wwn) self.mediator.get_object_by_id = Mock() self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(10, snapshot_volume_name, "wwn", "xiv") @@ -130,14 +154,19 @@ def _prepare_create_snapshot_mocks(self, storage_agent): snapshot_volume_name, "xiv") - def _test_create_snapshot_succeeds(self, storage_agent, expected_pool=None): + def _test_create_snapshot_succeeds(self, storage_agent, expected_space_efficiency=None, expected_pool=None, + system_id=None): self._prepare_create_snapshot_mocks(storage_agent) - self.servicer.CreateSnapshot(self.request, self.context) + response_snapshot = self.servicer.CreateSnapshot(self.request, self.context) self.assertEqual(self.context.code, grpc.StatusCode.OK) self.mediator.get_snapshot.assert_called_once_with(snapshot_volume_wwn, snapshot_name, pool=expected_pool) - self.mediator.create_snapshot.assert_called_once_with(snapshot_volume_wwn, snapshot_name, expected_pool) + self.mediator.create_snapshot.assert_called_once_with(snapshot_volume_wwn, snapshot_name, + expected_space_efficiency, expected_pool) + system_id_part = ':{}'.format(system_id) if system_id else '' + snapshot_id = 'xiv{}:0;wwn'.format(system_id_part) + self.assertEqual(response_snapshot.snapshot.snapshot_id, snapshot_id) @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_snapshot_succeeds(self, storage_agent): @@ -148,11 +177,18 @@ def test_create_snapshot_with_pool_parameter_succeeds(self, storage_agent): self.request.parameters = {config.PARAMETERS_POOL: pool} self._test_create_snapshot_succeeds(storage_agent, expected_pool=pool) + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_create_snapshot_with_space_efficiency_parameter_succeeds(self, storage_agent): + self.mediator.validate_supported_space_efficiency = Mock() + self.request.parameters = {config.PARAMETERS_SPACE_EFFICIENCY: space_efficiency} + self._test_create_snapshot_succeeds(storage_agent, expected_space_efficiency=space_efficiency) + def _test_create_snapshot_with_by_system_id_parameter(self, storage_agent, system_id, expected_pool): - self.request.source_volume_id = "{}:{}:{}".format("A9000", system_id, snapshot_volume_wwn) + system_id_part = ':{}'.format(system_id) if system_id else '' + self.request.source_volume_id = "{}{}:{}".format("A9000", system_id_part, snapshot_volume_wwn) self.request.parameters = {config.PARAMETERS_BY_SYSTEM: json.dumps( {"u1": {config.PARAMETERS_POOL: pool}, "u2": {config.PARAMETERS_POOL: "other_pool"}})} - self._test_create_snapshot_succeeds(storage_agent, expected_pool=expected_pool) + self._test_create_snapshot_succeeds(storage_agent, expected_pool=expected_pool, system_id=system_id) @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_snapshot_with_by_system_id_parameter_succeeds(self, storage_agent): @@ -180,11 +216,11 @@ def test_create_snapshot_no_source_volume(self): @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_snapshot_with_wrong_secrets(self, storage_agent): - self._test_create_object_with_wrong_secrets(storage_agent) + self._test_request_with_wrong_secrets(storage_agent) @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_snapshot_with_array_connection_exception(self, storage_agent): - self._test_create_object_with_array_connection_exception(storage_agent) + self._test_request_with_array_connection_exception(storage_agent) @patch("controller.controller_server.csi_controller_server.get_agent") def _test_create_snapshot_get_snapshot_raise_error(self, storage_agent, exception, grpc_status): @@ -239,7 +275,7 @@ def create_snapshot_returns_error(self, storage_agent, create_snapshot, return_c self.assertEqual(self.context.code, return_code) self.assertIn(msg, self.context.details) self.mediator.get_snapshot.assert_called_once_with(snapshot_volume_wwn, snapshot_name, pool=None) - self.mediator.create_snapshot.assert_called_once_with(snapshot_volume_wwn, snapshot_name, None) + self.mediator.create_snapshot.assert_called_once_with(snapshot_volume_wwn, snapshot_name, None, None) def test_create_snapshot_with_not_found_exception(self): self.create_snapshot_returns_error(return_code=grpc.StatusCode.NOT_FOUND, @@ -266,6 +302,10 @@ def test_create_snapshot_with_illegal_object_id_exception(self): self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, err=array_errors.IllegalObjectID("volume-id")) + def test_create_snapshot_with_space_efficiency_not_supported_exception(self): + self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.SpaceEfficiencyNotSupported(["fake"])) + def test_create_snapshot_with_other_exception(self): self.create_snapshot_returns_error(return_code=grpc.StatusCode.INTERNAL, err=Exception("error")) @@ -282,51 +322,59 @@ def test_create_snapshot_with_name_prefix(self, storage_agent): self.servicer.CreateSnapshot(self.request, self.context) self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.create_snapshot.assert_called_once_with(snapshot_volume_wwn, "prefix_some_name", None) + self.mediator.create_snapshot.assert_called_once_with(snapshot_volume_wwn, "prefix_some_name", None, None) -class TestControllerServerDeleteSnapshot(AbstractControllerTest): - def get_create_object_method(self): +class TestDeleteSnapshot(BaseControllerSetUp, CommonControllerTest): + def get_tested_method(self): return self.servicer.DeleteSnapshot - def get_create_object_response_method(self): + def get_tested_method_response_class(self): return csi_pb2.DeleteSnapshotResponse def setUp(self): super().setUp() - self.fqdn = "fqdn" - self.mediator = XIVArrayMediator("user", "password", self.fqdn) - self.mediator.client = Mock() self.mediator.get_snapshot = Mock() self.mediator.get_snapshot.return_value = None - self.storage_agent = MagicMock() - self.storage_agent.get_mediator.return_value.__enter__.return_value = self.mediator - self.servicer = ControllerServicer(self.fqdn) - self.request.parameters = {} - self.request.snapshot_id = "A9000:BADC0FFEE0DDF00D00000000DEADBABE" - self.context = utils.FakeContext() + self.mediator.delete_snapshot = Mock() + self.request.snapshot_id = "A9000:0;BADC0FFEE0DDF00D00000000DEADBABE" @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.delete_snapshot", Mock()) @patch("controller.controller_server.csi_controller_server.get_agent") - def test_delete_snapshot_succeeds(self, storage_agent): + def _test_delete_snapshot_succeeds(self, snapshot_id, storage_agent): storage_agent.return_value = self.storage_agent + self.request.snapshot_id = snapshot_id self.servicer.DeleteSnapshot(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) + def test_delete_snapshot_with_internal_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:0;volume-id") + self.mediator.delete_snapshot.assert_called_once() + + def test_delete_snapshot_with_system_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:system_id:volume-id") + self.mediator.delete_snapshot.assert_called_once() + + def test_delete_snapshot_with_system_id_internal_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:system_id:0;volume-id") + self.mediator.delete_snapshot.assert_called_once() + + def test_delete_snapshot_no_internal_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:volume-id") + self.mediator.delete_snapshot.assert_called_once() + + def test_delete_snapshot_bad_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:a:a:volume-id") + self.mediator.delete_snapshot.assert_not_called() + @patch("controller.controller_server.csi_controller_server.get_agent") def test_delete_snapshot_with_wrong_secrets(self, storage_agent): - self._test_create_object_with_wrong_secrets(storage_agent) + self._test_request_with_wrong_secrets(storage_agent) @patch("controller.controller_server.csi_controller_server.get_agent") def test_delete_snapshot_with_array_connection_exception(self, storage_agent): - storage_agent.side_effect = [Exception("a_enter error")] - - self.servicer.DeleteSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL, "array connection internal error") - self.assertTrue("a_enter error" in self.context.details) + self._test_request_with_array_connection_exception(storage_agent) @patch("controller.controller_server.csi_controller_server.get_agent") def test_delete_snapshot_invalid_snapshot_id(self, storage_agent): @@ -338,53 +386,25 @@ def test_delete_snapshot_invalid_snapshot_id(self, storage_agent): self.assertEqual(self.context.code, grpc.StatusCode.OK) -class ProtoBufMock(MagicMock): - def HasField(self, field): - return hasattr(self, field) - +class TestCreateVolume(BaseControllerSetUp, CommonControllerTest): -class TestControllerServerCreateVolume(AbstractControllerTest): - - def get_create_object_method(self): + def get_tested_method(self): return self.servicer.CreateVolume - def get_create_object_response_method(self): + def get_tested_method_response_class(self): return csi_pb2.CreateVolumeResponse def setUp(self): super().setUp() - self.fqdn = "fqdn" - self.mediator = XIVArrayMediator("user", "password", self.fqdn) - self.mediator.client = Mock() self.mediator.get_volume = Mock() - self.mediator.get_volume.side_effect = [array_errors.ObjectNotFoundError("volume")] - - self.storage_agent = MagicMock() - self.storage_agent.get_mediator.return_value.__enter__.return_value = self.mediator - - self.servicer = ControllerServicer(self.fqdn) - - caps = Mock() - caps.mount = Mock() - caps.mount.fs_type = "ext4" - access_types = csi_pb2.VolumeCapability.AccessMode - caps.access_mode.mode = access_types.SINGLE_NODE_WRITER - - self.request.volume_capabilities = [caps] - - self.mediator.maximal_volume_size_in_bytes = 10 - self.mediator.minimal_volume_size_in_bytes = 2 + self.mediator.get_volume.side_effect = array_errors.ObjectNotFoundError("vol") self.request.parameters = {config.PARAMETERS_POOL: pool} - self.capacity_bytes = 10 - self.request.capacity_range = Mock() - self.request.capacity_range.required_bytes = self.capacity_bytes + self.request.volume_capabilities = [self.volume_capability] self.request.name = volume_name self.request.volume_content_source = None - self.context = utils.FakeContext() - @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_with_empty_name(self, storage_agent): self._test_create_object_with_empty_name(storage_agent) @@ -395,7 +415,7 @@ def _prepare_create_volume_mocks(self, storage_agent): self.mediator.create_volume = Mock() self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, "volume", "wwn", "xiv") - def _test_create_volume_succeeds(self, storage_agent, expected_pool=pool): + def _test_create_volume_succeeds(self, storage_agent, expected_volume_id, expected_pool=pool): self._prepare_create_volume_mocks(storage_agent) response_volume = self.servicer.CreateVolume(self.request, self.context) @@ -404,21 +424,22 @@ def _test_create_volume_succeeds(self, storage_agent, expected_pool=pool): self.mediator.create_volume.assert_called_once_with(volume_name, 10, None, expected_pool) self.assertEqual(response_volume.volume.content_source.volume.volume_id, '') self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, '') + self.assertEqual(response_volume.volume.volume_id, expected_volume_id) @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_succeeds(self, storage_agent): - self._test_create_volume_succeeds(storage_agent) + self._test_create_volume_succeeds(storage_agent, 'xiv:0;wwn') @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_with_topologies_succeeds(self, storage_agent): self.request.secrets = utils.get_fake_secret_config(system_id="u2", supported_topologies=[ - {"topology.kubernetes.io/test": "topology_value"}]) + {"topology.block.csi.ibm.com/test": "topology_value"}]) self.request.accessibility_requirements.preferred = [ - ProtoBufMock(segments={"topology.kubernetes.io/test": "topology_value", - "topology.kubernetes.io/test2": "topology_value2"})] + ProtoBufMock(segments={"topology.block.csi.ibm.com/test": "topology_value", + "topology.block.csi.ibm.com/test2": "topology_value2"})] self.request.parameters = {config.PARAMETERS_BY_SYSTEM: json.dumps( {"u1": {config.PARAMETERS_POOL: pool}, "u2": {config.PARAMETERS_POOL: "other_pool"}})} - self._test_create_volume_succeeds(storage_agent, expected_pool="other_pool") + self._test_create_volume_succeeds(storage_agent, 'xiv:u2:0;wwn', expected_pool="other_pool") @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_with_space_efficiency_succeeds(self, storage_agent): @@ -447,7 +468,7 @@ def test_create_volume_idempotent_no_source_succeeds(self, storage_agent): @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_with_wrong_secrets(self, a_enter): - self._test_create_object_with_wrong_secrets(a_enter) + self._test_request_with_wrong_secrets(a_enter) @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_no_pool(self, storage_agent): @@ -458,49 +479,41 @@ def test_create_volume_no_pool(self, storage_agent): @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_with_wrong_parameters(self, storage_agent): - storage_agent.return_value = self.storage_agent - - self.request.parameters = {config.PARAMETERS_POOL: pool} - self.servicer.CreateVolume(self.request, self.context) - self.assertNotEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - - self.request.parameters = {"capabilities": ""} - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT, "capacity is missing in secrets") - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT, "pool parameter is missing") - self.assertIn("parameter", self.context.details) + self._test_request_with_wrong_parameters(storage_agent) @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_with_wrong_volume_capabilities(self, storage_agent): storage_agent.return_value = self.storage_agent - caps = Mock() - caps.mount = Mock() - caps.mount.fs_type = "ext42" - access_types = csi_pb2.VolumeCapability.AccessMode - caps.access_mode.mode = access_types.SINGLE_NODE_WRITER - - self.request.volume_capabilities = [caps] + volume_capability = utils.get_mock_volume_capability(fs_type="ext42") + self.request.volume_capabilities = [volume_capability] self.servicer.CreateVolume(self.request, self.context) self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT, "wrong fs_type") self.assertIn("fs_type", self.context.details) - caps.mount.fs_type = "ext4" - caps.access_mode.mode = access_types.MULTI_NODE_SINGLE_WRITER - self.request.volume_capabilities = [caps] + access_mode = csi_pb2.VolumeCapability.AccessMode + volume_capability = utils.get_mock_volume_capability(mode=access_mode.MULTI_NODE_SINGLE_WRITER) + self.request.volume_capabilities = [volume_capability] self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT, "wrong access_mode") + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) self.assertTrue("access mode" in self.context.details) + volume_capability = utils.get_mock_volume_capability(mount_flags=["no_formatting"]) + self.request.volume_capabilities = [volume_capability] + + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertTrue("mount_flags is unsupported" in self.context.details) + @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_with_array_connection_exception(self, storage_agent): - self._test_create_object_with_array_connection_exception(storage_agent) + self._test_request_with_array_connection_exception(storage_agent) @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_with_get_array_type_exception(self, storage_agent): - self._test_create_object_with_get_array_type_exception(storage_agent) + self._test_request_with_get_array_type_exception(storage_agent) @patch("controller.controller_server.csi_controller_server.get_agent") def test_create_volume_get_volume_exception(self, storage_agent): @@ -559,23 +572,23 @@ def test_create_volume_with_illegal_object_name_exception(self): self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, err=array_errors.IllegalObjectName("volume")) - def test_create_volume_with_create_volume_with_volume_exsits_exception(self): + def test_create_volume_with_volume_exists_exception(self): self.create_volume_returns_error(return_code=grpc.StatusCode.ALREADY_EXISTS, err=array_errors.VolumeAlreadyExists("volume", "endpoint")) - def test_create_volume_with_create_volume_with_pool_does_not_exist_exception(self): + def test_create_volume_with_pool_does_not_exist_exception(self): self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, err=array_errors.PoolDoesNotExist(pool, "endpoint")) - def test_create_volume_with_create_volume_with_pool_does_not_match_capabilities_exception(self): + def test_create_volume_with_pool_does_not_match_space_efficiency_exception(self): self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.PoolDoesNotMatchCapabilities(pool, "", "endpoint")) + err=array_errors.PoolDoesNotMatchSpaceEfficiency("pool1", "", "endpoint")) def test_create_volume_with_space_efficiency_not_supported_exception(self): self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, err=array_errors.SpaceEfficiencyNotSupported(["fake"])) - def test_create_volume_with_create_volume_with_other_exception(self): + def test_create_volume_with_other_exception(self): self.create_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, err=Exception("error")) @@ -653,7 +666,7 @@ def test_create_volume_with_required_bytes_too_large_fail(self, storage_agent): self.mediator.create_volume.assert_not_called() def test_create_volume_with_no_space_in_pool(self): - self.create_volume_returns_error(return_code=grpc.StatusCode.RESOURCE_EXHAUSTED, + self.create_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, err=array_errors.NotEnoughSpaceInPool("pool")) def _prepare_idempotent_tests(self): @@ -743,10 +756,9 @@ def test_create_volume_from_snapshot_success(self, storage_agent): "a9k") response_volume = self.servicer.CreateVolume(self.request, self.context) self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.copy_to_existing_volume_from_source.assert_called_once_with(volume_name, snapshot_name, + self.mediator.copy_to_existing_volume_from_source.assert_called_once_with('wwn2', snapshot_id, snapshot_capacity_bytes, - self.capacity_bytes, - pool) + self.capacity_bytes) self.assertEqual(response_volume.volume.content_source.volume.volume_id, '') self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, snapshot_id) @@ -767,8 +779,8 @@ def test_create_volume_from_source_source_snapshot_invalid(self): self.assertIn("invalid_snapshot_id", self.context.details) @patch("controller.controller_server.csi_controller_server.get_agent") - def test_create_volume_from_source_illegal_object_name(self, storage_agent): - array_exception = array_errors.IllegalObjectName("") + def test_create_volume_from_source_illegal_object_id(self, storage_agent): + array_exception = array_errors.IllegalObjectID("") self._test_create_volume_from_snapshot_error(storage_agent, array_exception, grpc.StatusCode.INVALID_ARGUMENT) @@ -826,9 +838,10 @@ def _test_create_volume_from_snapshot_error(self, storage_agent, copy_exception, self.storage_agent.get_mediator.return_value.__exit__.side_effect = [copy_exception] self.mediator.delete_volume = Mock() - self.servicer.CreateVolume(self.request, self.context) + response = self.servicer.CreateVolume(self.request, self.context) self.mediator.delete_volume.assert_called_with(target_volume_id) self.assertEqual(self.context.code, return_code) + self.assertIsInstance(response, csi_pb2.CreateVolumeResponse) @patch("controller.controller_server.csi_controller_server.get_agent") def test_clone_volume_success(self, storage_agent): @@ -843,10 +856,9 @@ def test_clone_volume_success(self, storage_agent): "a9k") response_volume = self.servicer.CreateVolume(self.request, self.context) self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.copy_to_existing_volume_from_source.assert_called_once_with(volume_name, clone_volume_name, + self.mediator.copy_to_existing_volume_from_source.assert_called_once_with('wwn2', volume_id, volume_capacity_bytes, - self.capacity_bytes, - pool) + self.capacity_bytes) self.assertEqual(response_volume.volume.content_source.volume.volume_id, volume_id) self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, '') @@ -866,7 +878,13 @@ def _get_source(object_id, object_type): return source -class TestControllerServerDeleteVolume(AbstractControllerTest): +class TestDeleteVolume(BaseControllerSetUp, CommonControllerTest): + + def get_tested_method(self): + return self.servicer.DeleteVolume + + def get_tested_method_response_class(self): + return csi_pb2.DeleteVolumeResponse def get_create_object_method(self): return self.servicer.DeleteVolume @@ -876,28 +894,16 @@ def get_create_object_response_method(self): def setUp(self): super().setUp() - self.fqdn = "fqdn" - self.mediator = XIVArrayMediator("user", "password", self.fqdn) - self.storage_agent = MagicMock() self.mediator.get_volume = Mock() self.mediator.is_volume_has_snapshots = Mock() self.mediator.is_volume_has_snapshots.return_value = False - self.mediator.client = Mock() - - self.storage_agent = MagicMock() - self.storage_agent.get_mediator.return_value.__enter__.return_value = self.mediator - - self.servicer = ControllerServicer(self.fqdn) - - self.request.volume_id = "xiv:volume-id" - - self.context = utils.FakeContext() + self.request.volume_id = "xiv:0;volume-id" @patch("controller.controller_server.csi_controller_server.get_agent") def test_delete_volume_with_wrong_secrets(self, storage_agent): - self._test_create_object_with_wrong_secrets(storage_agent) + self._test_request_with_wrong_secrets(storage_agent) @patch("controller.controller_server.csi_controller_server.get_agent") def test_delete_volume_invalid_volume_id(self, storage_agent): @@ -914,7 +920,7 @@ def test_delete_volume_with_array_connection_exception(self, storage_agent): self.servicer.DeleteVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL, "array connection internal error") + self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) self.assertTrue("a_enter error" in self.context.details) @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.delete_volume") @@ -943,29 +949,39 @@ def test_delete_volume_has_snapshots(self): @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.delete_volume") @patch("controller.controller_server.csi_controller_server.get_agent") - def test_delete_volume_succeeds(self, storage_agent, delete_volume): + def _test_delete_volume_succeeds(self, volume_id, storage_agent, delete_volume): storage_agent.return_value = self.storage_agent delete_volume.return_value = Mock() + self.request.volume_id = volume_id self.servicer.DeleteVolume(self.request, self.context) self.assertEqual(self.context.code, grpc.StatusCode.OK) + def test_delete_volume_with_internal_id_succeeds(self): + self._test_delete_volume_succeeds("xiv:0;volume-id") -class TestControllerServerPublishVolume(AbstractControllerTest): + def test_delete_volume_with_system_id_succeeds(self): + self._test_delete_volume_succeeds("xiv:system_id:volume-id") - def get_create_object_method(self): + def test_delete_volume_with_system_id_internal_id_succeeds(self): + self._test_delete_volume_succeeds("xiv:system_id:0;volume-id") + + def test_delete_volume_no_internal_id_succeeds(self): + self._test_delete_volume_succeeds("xiv:volume-id") + + +class TestPublishVolume(BaseControllerSetUp, CommonControllerTest): + + def get_tested_method(self): return self.servicer.ControllerPublishVolume - def get_create_object_response_method(self): + def get_tested_method_response_class(self): return csi_pb2.ControllerPublishVolumeResponse def setUp(self): super().setUp() - self.fqdn = "fqdn" - self.hostname = "hostname" - self.mediator = XIVArrayMediator("user", "password", self.fqdn) - self.mediator.client = Mock() + self.hostname = "hostname" self.mediator.get_host_by_host_identifiers = Mock() self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["iscsi"] @@ -978,30 +994,13 @@ def setUp(self): self.mediator.get_iscsi_targets_by_iqn = Mock() self.mediator.get_iscsi_targets_by_iqn.return_value = {"iqn1": ["1.1.1.1", "2.2.2.2"], "iqn2": ["[::1]"]} - self.mediator.client = Mock() - - self.storage_agent = MagicMock() - self.storage_agent.get_mediator.return_value.__enter__.return_value = self.mediator - - self.servicer = ControllerServicer(self.fqdn) - - self.request = Mock() arr_type = XIVArrayMediator.array_type self.request.volume_id = "{}:wwn1".format(arr_type) self.request.node_id = "hostname;iqn.1994-05.com.redhat:686358c930fe;500143802426baf4" self.request.readonly = False self.request.readonly = False - self.request.secrets = {"username": "user", "password": "pass", "management_address": "mg"} - self.request.volume_context = {} - caps = Mock() - caps.mount = Mock() - caps.mount.fs_type = "ext4" - access_types = csi_pb2.VolumeCapability.AccessMode - caps.access_mode.mode = access_types.SINGLE_NODE_WRITER - self.request.volume_capability = caps - - self.context = utils.FakeContext() + self.request.volume_capability = utils.get_mock_volume_capability() @patch("controller.controller_server.csi_controller_server.get_agent") def test_publish_volume_success(self, storage_agent): @@ -1022,7 +1021,7 @@ def test_publish_volume_validateion_exception(self, publish_validation): @patch("controller.controller_server.csi_controller_server.get_agent") def test_publish_volume_with_wrong_secrets(self, storage_agent): - self._test_create_object_with_wrong_secrets(storage_agent) + self._test_request_with_wrong_secrets(storage_agent) def test_publish_volume_wrong_volume_id(self): self.request.volume_id = "some-wrong-id-format" @@ -1268,20 +1267,17 @@ def test_map_volume_by_initiators_exceptions(self, storage_agent, map_volume_by_ self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) -class TestControllerServerUnPublishVolume(AbstractControllerTest): +class TestUnpublishVolume(BaseControllerSetUp, CommonControllerTest): - def get_create_object_method(self): + def get_tested_method(self): return self.servicer.ControllerUnpublishVolume - def get_create_object_response_method(self): + def get_tested_method_response_class(self): return csi_pb2.ControllerUnpublishVolumeResponse def setUp(self): super().setUp() - self.fqdn = "fqdn" self.hostname = "hostname" - self.mediator = XIVArrayMediator("user", "password", self.fqdn) - self.mediator.client = Mock() self.mediator.get_host_by_host_identifiers = Mock() self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["iscsi"] @@ -1289,19 +1285,9 @@ def setUp(self): self.mediator.unmap_volume = Mock() self.mediator.unmap_volume.return_value = None - self.storage_agent = MagicMock() - self.storage_agent.get_mediator.return_value.__enter__.return_value = self.mediator - - self.servicer = ControllerServicer(self.fqdn) - - self.request = Mock() arr_type = XIVArrayMediator.array_type self.request.volume_id = "{}:wwn1".format(arr_type) self.request.node_id = "hostname;iqn1;500143802426baf4" - self.request.secrets = {"username": "user", "password": "pass", "management_address": "mg"} - self.request.volume_context = {} - - self.context = utils.FakeContext() @patch("controller.controller_server.csi_controller_server.get_agent") def test_unpublish_volume_success(self, storage_agent): @@ -1322,10 +1308,10 @@ def test_unpublish_volume_validation_exception(self, publish_validation): @patch("controller.controller_server.csi_controller_server.get_agent") def test_unpublish_volume_with_wrong_secrets(self, storage_agent): - self._test_create_object_with_wrong_secrets(storage_agent) + self._test_request_with_wrong_secrets(storage_agent) - def test_unpublish_volume_wrong_volume_id(self): - self.request.volume_id = "some-wrong-id-format" + def test_unpublish_volume_with_too_much_delimiters_in_volume_id(self): + self.request.volume_id = "too:much:delimiters:in:id" self.servicer.ControllerUnpublishVolume(self.request, self.context) @@ -1382,54 +1368,27 @@ def test_unpublish_volume_unmap_volume_excpetions(self, storage_agent): self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) -class TestControllerServerGetCapabilities(unittest.TestCase): - - def setUp(self): - self.fqdn = "fqdn" - self.servicer = ControllerServicer(self.fqdn) +class TestGetCapabilities(BaseControllerSetUp): def test_controller_get_capabilities(self): - request = Mock() - context = Mock() - self.servicer.ControllerGetCapabilities(request, context) + self.servicer.ControllerGetCapabilities(self.request, self.context) -class TestControllerServerExpandVolume(AbstractControllerTest): +class TestExpandVolume(BaseControllerSetUp, CommonControllerTest): - def get_create_object_method(self): + def get_tested_method(self): return self.servicer.ControllerExpandVolume - def get_create_object_response_method(self): + def get_tested_method_response_class(self): return csi_pb2.ControllerExpandVolumeResponse def setUp(self): super().setUp() - self.fqdn = "fqdn" - self.mediator = XIVArrayMediator("user", "password", self.fqdn) - self.mediator.client = Mock() - self.storage_agent = MagicMock() - self.storage_agent.get_mediator.return_value.__enter__.return_value = self.mediator - - self.servicer = ControllerServicer(self.fqdn) - - self.access_types = csi_pb2.VolumeCapability.AccessMode - self.fs_type = "ext4" - - self.mediator.maximal_volume_size_in_bytes = 10 - self.mediator.minimal_volume_size_in_bytes = 2 - - self.request.volume_capability = csi_pb2.VolumeCapability( - access_mode=csi_pb2.VolumeCapability.AccessMode(mode=self.access_types.SINGLE_NODE_WRITER), - mount=csi_pb2.VolumeCapability.MountVolume(fs_type=self.fs_type)) self.request.parameters = {} - self.capacity_bytes = 6 - self.request.capacity_range = Mock() - self.request.capacity_range.required_bytes = self.capacity_bytes - self.volume_id = "volume-id" + self.volume_id = "vol-id" self.request.volume_id = "{}:{}".format("xiv", self.volume_id) self.request.volume_content_source = None - self.context = utils.FakeContext() self.mediator.get_object_by_id = Mock() self.volume_before_expand = utils.get_mock_mediator_response_volume(2, volume_name, @@ -1440,6 +1399,7 @@ def setUp(self): self.volume_id, "a9k") self.mediator.get_object_by_id.side_effect = [self.volume_before_expand, self.volume_after_expand] + self.request.volume_capability = self.volume_capability def _prepare_expand_volume_mocks(self, storage_agent): storage_agent.return_value = self.storage_agent @@ -1523,11 +1483,11 @@ def test_expand_volume_not_found_after_expansion(self, storage_agent): @patch("controller.controller_server.csi_controller_server.get_agent") def test_expand_volume_with_wrong_secrets(self, a_enter): - self._test_create_object_with_wrong_secrets(a_enter) + self._test_request_with_wrong_secrets(a_enter) @patch("controller.controller_server.csi_controller_server.get_agent") def test_expand_volume_with_array_connection_exception(self, storage_agent): - self._test_create_object_with_array_connection_exception(storage_agent) + self._test_request_with_array_connection_exception(storage_agent) @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.expand_volume") @patch("controller.controller_server.csi_controller_server.get_agent") @@ -1564,13 +1524,9 @@ def test_expand_volume_with_no_space_in_pool_exception(self): err=array_errors.NotEnoughSpaceInPool("pool")) -class TestIdentityServer(unittest.TestCase): - - def setUp(self): - self.fqdn = "fqdn" - self.servicer = ControllerServicer(self.fqdn) +class TestIdentityServer(BaseControllerSetUp): - @patch.object(ControllerServicer, "_ControllerServicer__get_identity_config") + @patch.object(CSIControllerServicer, "get_identity_config") def test_identity_plugin_get_info_succeeds(self, identity_config): plugin_name = "plugin-name" version = "1.1.0" @@ -1581,7 +1537,7 @@ def test_identity_plugin_get_info_succeeds(self, identity_config): res = self.servicer.GetPluginInfo(request, context) self.assertEqual(res, csi_pb2.GetPluginInfoResponse(name=plugin_name, vendor_version=version)) - @patch.object(ControllerServicer, "_ControllerServicer__get_identity_config") + @patch.object(CSIControllerServicer, "get_identity_config") def test_identity_plugin_get_info_fails_when_attributes_from_config_are_missing(self, identity_config): request = Mock() context = Mock() @@ -1596,7 +1552,7 @@ def test_identity_plugin_get_info_fails_when_attributes_from_config_are_missing( self.assertEqual(res, csi_pb2.GetPluginInfoResponse()) context.set_code.assert_called_with(grpc.StatusCode.INTERNAL) - @patch.object(ControllerServicer, "_ControllerServicer__get_identity_config") + @patch.object(CSIControllerServicer, "get_identity_config") def test_identity_plugin_get_info_fails_when_name_or_value_are_empty(self, identity_config): request = Mock() context = Mock() @@ -1621,3 +1577,148 @@ def test_identity_probe(self): request = Mock() context = Mock() self.servicer.Probe(request, context) + + +class TestValidateVolumeCapabilities(BaseControllerSetUp, CommonControllerTest): + + def get_tested_method(self): + return self.servicer.ValidateVolumeCapabilities + + def get_tested_method_response_class(self): + return csi_pb2.ValidateVolumeCapabilitiesResponse + + def setUp(self): + super().setUp() + + arr_type = XIVArrayMediator.array_type + self.request.volume_id = "{}:wwn1".format(arr_type) + self.request.parameters = {config.PARAMETERS_POOL: "pool1"} + + self.mediator.get_object_by_id = Mock() + self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(10, "vol", "wwn2", "a9k") + self.request.volume_capabilities = [self.volume_capability] + + def _assertResponse(self, expected_status_code, expected_details_substring): + self.assertEqual(self.context.code, expected_status_code) + self.assertTrue(expected_details_substring in self.context.details) + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_success(self, storage_agent): + storage_agent.return_value = self.storage_agent + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_empty_id(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.volume_id = "" + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.INVALID_ARGUMENT, "volume id") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_wrong_secrets(self, storage_agent): + self._test_request_with_wrong_secrets(storage_agent) + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_unsupported_access_mode(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.volume_capabilities[0].access_mode.mode = 999 + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.INVALID_ARGUMENT, "unsupported access mode") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_unsupported_fs_type(self, storage_agent): + storage_agent.return_value = self.storage_agent + + volume_capability = utils.get_mock_volume_capability(fs_type="ext3") + self.request.volume_capabilities = [volume_capability] + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.INVALID_ARGUMENT, "fs_type") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_no_capabilities(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.volume_capabilities = {} + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.INVALID_ARGUMENT, "not set") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_bad_id(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.volume_id = "wwn1" + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.NOT_FOUND, "id format") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_volume_not_found(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.mediator.get_object_by_id.return_value = None + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.NOT_FOUND, "wwn") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_volume_context_not_match(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.volume_context = {config.VOLUME_CONTEXT_VOLUME_NAME: "fake"} + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.INVALID_ARGUMENT, "volume context") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_space_efficiency_not_match(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.parameters.update({config.PARAMETERS_SPACE_EFFICIENCY: "not_none"}) + self.mediator.validate_supported_space_efficiency = Mock() + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.INVALID_ARGUMENT, "space efficiency") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_pool_not_match(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.parameters.update({config.PARAMETERS_POOL: "other pool"}) + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.INVALID_ARGUMENT, "pool") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_with_prefix_not_match(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.parameters.update({config.PARAMETERS_VOLUME_NAME_PREFIX: "prefix"}) + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assertResponse(grpc.StatusCode.INVALID_ARGUMENT, "prefix") + + @patch("controller.controller_server.csi_controller_server.get_agent") + def test_validate_volume_capabilities_parameters_success(self, storage_agent): + storage_agent.return_value = self.storage_agent + self.request.parameters = {config.PARAMETERS_VOLUME_NAME_PREFIX: "prefix", + config.PARAMETERS_POOL: "pool2", + config.PARAMETERS_SPACE_EFFICIENCY: "not_none"} + volume_response = utils.get_mock_mediator_response_volume(10, "prefix_vol", "wwn2", "a9k", + space_efficiency="not_none") + volume_response.pool = "pool2" + self.mediator.get_object_by_id.return_value = volume_response + self.mediator.validate_supported_space_efficiency = Mock() + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) diff --git a/controller/tests/controller_server/utils_test.py b/controller/tests/controller_server/utils_test.py index 4184ea9b6..0e768e439 100644 --- a/controller/tests/controller_server/utils_test.py +++ b/controller/tests/controller_server/utils_test.py @@ -5,11 +5,13 @@ import controller.array_action.errors as array_errors import controller.controller_server.utils as utils -from controller.controller_server import config -from controller.controller_server.csi_controller_server import ControllerServicer +from controller.array_action import config as array_config +from controller.controller_server import config as controller_config +from controller.controller_server.csi_controller_server import CSIControllerServicer from controller.controller_server.errors import ObjectIdError, ValidationException from controller.controller_server.test_settings import pool, user, password, array from controller.csi_general import csi_pb2 +from controller.tests import utils as test_utils from controller.tests.controller_server.csi_controller_server_test import ProtoBufMock from controller.tests.utils import get_fake_secret_config @@ -17,8 +19,7 @@ class TestUtils(unittest.TestCase): def setUp(self): - self.fqdn = "fqdn" - self.servicer = ControllerServicer(self.fqdn) + self.servicer = CSIControllerServicer() self.config = {"controller": {"publish_context_lun_parameter": "lun", "publish_context_connectivity_parameter": "connectivity_type", "publish_context_separator": ",", @@ -75,7 +76,7 @@ def _test_validate_secrets_with_config_valid_system_id(self, system_id): def test_validate_secrets_with_config_valid_system_id(self): self._test_validate_secrets_with_config_valid_system_id("ui_.d") - self._test_validate_secrets_with_config_valid_system_id("a" * config.SECRET_SYSTEM_ID_MAX_LENGTH) + self._test_validate_secrets_with_config_valid_system_id("a" * controller_config.SECRET_SYSTEM_ID_MAX_LENGTH) def _test_validate_secrets_with_config_invalid_system_id(self, system_id): secrets = get_fake_secret_config(system_id=system_id) @@ -83,7 +84,7 @@ def _test_validate_secrets_with_config_invalid_system_id(self, system_id): def test_validate_secrets_with_config_invalid_parameters(self): system_ids = ["-u1", "u:1", "u1+", "u1*", "u-1(", "u/1", "u=1", " ", "", - "a" * (config.SECRET_SYSTEM_ID_MAX_LENGTH + 1)] + "a" * (controller_config.SECRET_SYSTEM_ID_MAX_LENGTH + 1)] for system_id in system_ids: self._test_validate_secrets_with_config_invalid_system_id(system_id=system_id) @@ -105,46 +106,42 @@ def test_get_array_connection_info_from_secrets(self): self._test_get_array_connection_info_from_secrets(secrets, system_id="u1") secrets = {"username": user, "password": password, "management_address": array} self._test_get_array_connection_info_from_secrets(secrets) - secrets = get_fake_secret_config(supported_topologies=[{"topology.kubernetes.io/test": "zone1"}]) + secrets = get_fake_secret_config(supported_topologies=[{"topology.block.csi.ibm.com/test1": "zone1"}]) self._test_get_array_connection_info_from_secrets(secrets, - topologies={"topology.kubernetes.io/test": "zone1", - "topology.block.csi.ibm.com/test": "dev1"}) + topologies={"topology.block.csi.ibm.com/test1": "zone1", + "topology.block.csi.ibm.com/test2": "dev1"}) def _test_get_pool_from_parameters(self, parameters, expected_pool=pool, system_id=None): volume_parameters = utils.get_volume_parameters(parameters, system_id) self.assertEqual(volume_parameters.pool, expected_pool) def test_get_pool_from_parameters(self): - parameters = {config.PARAMETERS_POOL: pool} + parameters = {controller_config.PARAMETERS_POOL: pool} self._test_get_pool_from_parameters(parameters) self._test_get_pool_from_parameters(parameters, system_id="u1") - parameters = {config.PARAMETERS_BY_SYSTEM: json.dumps( - {"u1": {config.PARAMETERS_POOL: pool}, "u2": {config.PARAMETERS_POOL: "other_pool"}})} + parameters = {controller_config.PARAMETERS_BY_SYSTEM: json.dumps( + {"u1": {controller_config.PARAMETERS_POOL: pool}, "u2": {controller_config.PARAMETERS_POOL: "other_pool"}})} self._test_get_pool_from_parameters(parameters, system_id="u1") self._test_get_pool_from_parameters(parameters, expected_pool="other_pool", system_id="u2") self._test_get_pool_from_parameters(parameters, expected_pool=None) def test_validate_file_system_volume_capabilities(self): - cap = Mock() - cap.mount = Mock() - cap.mount.fs_type = "ext4" access_mode = csi_pb2.VolumeCapability.AccessMode - cap.access_mode.mode = access_mode.SINGLE_NODE_WRITER - cap.HasField.return_value = True - utils.validate_csi_volume_capabilties([cap]) + cap = test_utils.get_mock_volume_capability() + utils.validate_csi_volume_capabilities([cap]) with self.assertRaises(ValidationException): - utils.validate_csi_volume_capabilties([]) + utils.validate_csi_volume_capabilities([]) cap.mount.fs_type = "ext4dummy" with self.assertRaises(ValidationException): - utils.validate_csi_volume_capabilties([cap]) + utils.validate_csi_volume_capabilities([cap]) cap.mount.fs_type = "ext4" cap.access_mode.mode = access_mode.SINGLE_NODE_READER_ONLY with self.assertRaises(ValidationException): - utils.validate_csi_volume_capabilties([cap]) + utils.validate_csi_volume_capabilities([cap]) def test_validate_create_volume_source_empty(self): request = Mock() @@ -175,11 +172,11 @@ def test_validate_raw_block_volume_capabilities(self): is_block = True caps.HasField.side_effect = [is_mount, is_block] - utils.validate_csi_volume_capabilties([caps]) + utils.validate_csi_volume_capabilities([caps]) @patch('controller.controller_server.utils.validate_secrets') - @patch('controller.controller_server.utils.validate_csi_volume_capabilties') - def test_validate_create_volume_request(self, valiate_capabilities, validate_secrets): + @patch('controller.controller_server.utils.validate_csi_volume_capabilities') + def test_validate_create_volume_request(self, validate_capabilities, validate_secrets): request = Mock() request.name = "" @@ -196,13 +193,13 @@ def test_validate_create_volume_request(self, valiate_capabilities, validate_sec self.assertTrue("size" in str(ex)) request.capacity_range.required_bytes = 10 - valiate_capabilities.side_effect = ValidationException("msg") + validate_capabilities.side_effect = ValidationException("msg") with self.assertRaises(ValidationException) as ex: utils.validate_create_volume_request(request) self.assertTrue("msg" in str(ex)) - valiate_capabilities.side_effect = None + validate_capabilities.side_effect = None validate_secrets.side_effect = ValidationException(" other msg") @@ -230,12 +227,13 @@ def test_validate_create_volume_request(self, valiate_capabilities, validate_sec utils.validate_create_volume_request(request) self.assertTrue("parameters" in str(ex)) - request.parameters = {config.PARAMETERS_POOL: pool, config.PARAMETERS_SPACE_EFFICIENCY: "thin "} + request.parameters = {controller_config.PARAMETERS_POOL: pool, + controller_config.PARAMETERS_SPACE_EFFICIENCY: "thin "} request.volume_content_source = None utils.validate_create_volume_request(request) - request.parameters = {config.PARAMETERS_POOL: pool} + request.parameters = {controller_config.PARAMETERS_POOL: pool} utils.validate_create_volume_request(request) request.capacity_range.required_bytes = 0 @@ -322,12 +320,11 @@ def test_validate_publish_volume_request(self, validate_capabilities, validate_s self.assertTrue("msg1" in str(ex)) validate_capabilities.side_effect = None - request.secrets = ["secrets"] - validate_secrets.side_effect = [ValidationException("msg2")] + validate_secrets.side_effect = [ValidationException("secrets")] with self.assertRaises(ValidationException) as ex: utils.validate_publish_volume_request(request) - self.assertTrue("msg2" in str(ex)) + self.assertTrue("secrets" in ex.message) validate_secrets.side_effect = None @@ -338,14 +335,14 @@ def test_validate_unpublish_volume_request(self, validate_secrets): request = Mock() request.volume_id = "somebadvolumename" - with self.assertRaises(ValidationException) as ex: + with self.assertRaises(ObjectIdError) as ex: utils.validate_unpublish_volume_request(request) self.assertTrue("volume" in str(ex)) request.volume_id = "xiv:volume" - request.secrets = ["secrets"] - validate_secrets.side_effect = [ValidationException("msg2")] + validate_secrets.side_effect = [ValidationException("secret")] + with self.assertRaises(ValidationException) as ex: utils.validate_unpublish_volume_request(request) self.assertTrue("msg2" in str(ex)) @@ -354,18 +351,37 @@ def test_validate_unpublish_volume_request(self, validate_secrets): utils.validate_unpublish_volume_request(request) - request.volume_id = "xiv:u2:volume" - - utils.validate_unpublish_volume_request(request) + def _test_get_volume_id_info(self, object_id, system_id=None, internal_id=None): + system_id_field = ':{}'.format(system_id) if system_id else '' + ids_field = '{};{}'.format(internal_id, object_id) if internal_id else object_id + volume_id = '{}{}:{}'.format('xiv', system_id_field, ids_field) + volume_id_info = utils.get_volume_id_info(volume_id) + self.assertEqual(volume_id_info.array_type, "xiv") + self.assertEqual(volume_id_info.system_id, system_id) + self.assertEqual(volume_id_info.internal_id, internal_id) + self.assertEqual(volume_id_info.object_id, object_id) def test_get_volume_id_info(self): + self._test_get_volume_id_info(object_id="volume-id") + + def test_get_volume_id_info_with_system_id(self): + self._test_get_volume_id_info(object_id="volume-id", system_id="system_id") + + def test_get_volume_id_info_with_internal_id(self): + self._test_get_volume_id_info(object_id="volume-id", internal_id="0") + + def test_get_volume_id_info_with_internal_id_system_id(self): + self._test_get_volume_id_info(object_id="volume-id", system_id="system_id", internal_id="0") + + def test_get_volume_id_info_too_many_semicolons_fail(self): with self.assertRaises(ObjectIdError) as ex: - utils.get_volume_id_info("badvolumeformat") - self.assertTrue("volume" in str(ex)) + utils.get_volume_id_info("xiv:0;volume;id") + self.assertIn("Wrong volume id format", str(ex.exception)) - volume_id_info = utils.get_volume_id_info("xiv:volume-id") - self.assertEqual(volume_id_info.array_type, "xiv") - self.assertEqual(volume_id_info.object_id, "volume-id") + def test_get_volume_id_info_no_id_fail(self): + with self.assertRaises(ObjectIdError) as ex: + utils.get_volume_id_info("badvolumeformat") + self.assertIn("Wrong volume id format", str(ex.exception)) def test_get_node_id_info(self): with self.assertRaises(array_errors.HostNotFoundError) as ex: @@ -412,3 +428,61 @@ def test_generate_publish_volume_response_success(self): self.assertEqual(res.publish_context["lun"], '1') self.assertEqual(res.publish_context["connectivity_type"], "fc") self.assertEqual(res.publish_context["fc_wwns"], "wwn1,wwn2") + + def _test_validate_parameters_match_volume(self, volume_field, volume_value, parameter_field, parameter_value, + default_space_efficiency=None): + volume = test_utils.get_mock_mediator_response_volume(10, "vol", "wwn2", "a9k") + setattr(volume, volume_field, volume_value) + volume.default_space_efficiency = default_space_efficiency + if parameter_field: + parameters = {parameter_field: parameter_value} + else: + parameters = {} + + utils.validate_parameters_match_volume(parameters, volume) + + def test_validate_parameters_match_volume_se_fail(self): + with self.assertRaises(ValidationException): + self._test_validate_parameters_match_volume(volume_field="space_efficiency", + volume_value=array_config.SPACE_EFFICIENCY_NONE, + parameter_field=controller_config.PARAMETERS_SPACE_EFFICIENCY, + parameter_value="thin") + + def test_validate_parameters_match_volume_thin_se_success(self): + self._test_validate_parameters_match_volume(volume_field="space_efficiency", + volume_value=array_config.SPACE_EFFICIENCY_THIN, + parameter_field=controller_config.PARAMETERS_SPACE_EFFICIENCY, + parameter_value="thin") + + def test_validate_parameters_match_volume_default_se_success(self): + self._test_validate_parameters_match_volume(volume_field="space_efficiency", + volume_value=array_config.SPACE_EFFICIENCY_NONE, + parameter_field=None, + parameter_value=None, + default_space_efficiency='none') + + def test_validate_parameters_match_volume_pool_fail(self): + with self.assertRaises(ValidationException): + self._test_validate_parameters_match_volume(volume_field="pool", + volume_value="test_pool", + parameter_field=controller_config.PARAMETERS_POOL, + parameter_value="fake_pool") + + def test_validate_parameters_match_volume_pool_success(self): + self._test_validate_parameters_match_volume(volume_field="pool", + volume_value="test_pool", + parameter_field=controller_config.PARAMETERS_POOL, + parameter_value="test_pool") + + def test_validate_parameters_match_volume_prefix_fail(self): + with self.assertRaises(ValidationException): + self._test_validate_parameters_match_volume(volume_field="name", + volume_value="vol-with-no-prefix", + parameter_field=controller_config.PARAMETERS_VOLUME_NAME_PREFIX, + parameter_value="prefix") + + def test_validate_parameters_match_volume_prefix_success(self): + self._test_validate_parameters_match_volume(volume_field="name", + volume_value="prefix_vol", + parameter_field=controller_config.PARAMETERS_VOLUME_NAME_PREFIX, + parameter_value="prefix") diff --git a/controller/tests/utils.py b/controller/tests/utils.py index 017f5c4ae..42e0d7549 100644 --- a/controller/tests/utils.py +++ b/controller/tests/utils.py @@ -1,22 +1,30 @@ import json -from mock import Mock import grpc +from mock import Mock, MagicMock from controller.controller_server.controller_types import ArrayConnectionInfo from controller.controller_server.test_settings import user as test_user, password as test_password, array as test_array -def get_mock_mediator_response_volume(size, name, wwn, array_type, copy_source_id=None): +class ProtoBufMock(MagicMock): + def HasField(self, field): + return hasattr(self, field) + + +def get_mock_mediator_response_volume(size, name, wwn, array_type, copy_source_id=None, space_efficiency=None, + default_space_efficiency=None): volume = Mock() volume.capacity_bytes = size volume.id = wwn + volume.internal_id = "0" volume.name = name volume.array_address = "arr1" volume.pool = "pool1" volume.array_type = array_type volume.copy_source_id = copy_source_id - + volume.space_efficiency = space_efficiency + volume.default_space_efficiency = default_space_efficiency return volume @@ -24,6 +32,7 @@ def get_mock_mediator_response_snapshot(capacity, name, wwn, volume_name, array_ snapshot = Mock() snapshot.capacity_bytes = capacity snapshot.id = wwn + snapshot.internal_id = "0" snapshot.name = name snapshot.volume_name = volume_name snapshot.array_address = "arr1" @@ -33,6 +42,31 @@ def get_mock_mediator_response_snapshot(capacity, name, wwn, volume_name, array_ return snapshot +def get_mock_mediator_response_replication(name, volume_internal_id, other_volume_internal_id, + copy_type="sync", is_primary=True, is_ready=True): + replication = Mock() + replication.name = name + replication.volume_internal_id = volume_internal_id + replication.other_volume_internal_id = other_volume_internal_id + replication.copy_type = copy_type + replication.is_primary = is_primary + replication.is_ready = is_ready + + return replication + + +def get_mock_volume_capability(mode=1, fs_type="ext4", mount_flags=None): + capability = ProtoBufMock(spec=["mount", "access_mode"]) + mount = ProtoBufMock(spec=["fs_type", "mount_flags"]) + access_mode = ProtoBufMock(spec=["mode"]) + setattr(mount, "mount_flags", mount_flags) + setattr(mount, "fs_type", fs_type) + setattr(access_mode, "mode", mode) + setattr(capability, "mount", mount) + setattr(capability, "access_mode", access_mode) + return capability + + def get_fake_array_connection_info(user="user", password="pass", array_addresses=None, system_id="u1"): if array_addresses is None: array_addresses = ["arr1"] diff --git a/deploy/kubernetes/examples/demo-pvc-file-system.yaml b/deploy/kubernetes/examples/demo-pvc-file-system.yaml index 2bb67464e..28f093dc9 100644 --- a/deploy/kubernetes/examples/demo-pvc-file-system.yaml +++ b/deploy/kubernetes/examples/demo-pvc-file-system.yaml @@ -3,7 +3,7 @@ apiVersion: v1 metadata: name: demo-pvc-file-system spec: - volumeMode: Filesystem # Optional. The default is Filesystem. + volumeMode: Filesystem # Optional. The default is Filesystem. accessModes: - ReadWriteOnce resources: diff --git a/deploy/kubernetes/examples/demo-pvc-from-snapshot.yaml b/deploy/kubernetes/examples/demo-pvc-from-snapshot.yaml index d4b632a22..9f45307cb 100644 --- a/deploy/kubernetes/examples/demo-pvc-from-snapshot.yaml +++ b/deploy/kubernetes/examples/demo-pvc-from-snapshot.yaml @@ -11,6 +11,6 @@ spec: storage: 1Gi storageClassName: demo-storageclass dataSource: - name: demo-snapshot + name: demo-volumesnapshot kind: VolumeSnapshot apiGroup: snapshot.storage.k8s.io diff --git a/deploy/kubernetes/examples/demo-secret-config.json b/deploy/kubernetes/examples/demo-secret-config.json new file mode 100644 index 000000000..b4539a55c --- /dev/null +++ b/deploy/kubernetes/examples/demo-secret-config.json @@ -0,0 +1,24 @@ +{ + "demo-management-id-1": { + "username": "demo-username-1", + "password": "demo-password-1", + "management_address": "demo-management-address-1", + "supported_topologies": [ + { + "topology.block.csi.ibm.com/demo-region": "demo-region-1", + "topology.block.csi.ibm.com/demo-zone": "demo-zone-1" + } + ] + }, + "demo-management-id-2": { + "username": "demo-username-2", + "password": "demo-password-2", + "management_address": "demo-management-address-2", + "supported_topologies": [ + { + "topology.block.csi.ibm.com/demo-region": "demo-region-2", + "topology.block.csi.ibm.com/demo-zone": "demo-zone-2" + } + ] + } +} diff --git a/deploy/kubernetes/examples/demo-snapshotclass.yaml b/deploy/kubernetes/examples/demo-snapshotclass.yaml deleted file mode 100644 index 98039a921..000000000 --- a/deploy/kubernetes/examples/demo-snapshotclass.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: snapshot.storage.k8s.io/v1beta1 -kind: VolumeSnapshotClass -metadata: - name: demo-snapshotclass -driver: block.csi.ibm.com -deletionPolicy: Delete -parameters: - csi.storage.k8s.io/snapshotter-secret-name: demo-secret - csi.storage.k8s.io/snapshotter-secret-namespace: default - snapshot_name_prefix: demoSnapshot # Optional. - pool: demo-pool # Optional. Use to create the snapshot on a different pool than the source. diff --git a/deploy/kubernetes/examples/demo-statefulset-file-system.yaml b/deploy/kubernetes/examples/demo-statefulset-file-system.yaml deleted file mode 100644 index b0ab4e9ec..000000000 --- a/deploy/kubernetes/examples/demo-statefulset-file-system.yaml +++ /dev/null @@ -1,27 +0,0 @@ -kind: StatefulSet -apiVersion: apps/v1 -metadata: - name: demo-statefulset-file-system -spec: - selector: - matchLabels: - app: demo-statefulset - serviceName: demo-statefulset - replicas: 1 - template: - metadata: - labels: - app: demo-statefulset - spec: - containers: - - name: demo-container - image: registry.access.redhat.com/ubi8/ubi:latest - command: [ "/bin/sh", "-c", "--" ] - args: [ "while true; do sleep 30; done;" ] - volumeMounts: - - name: demo-volume-file-system - mountPath: "/data" - volumes: - - name: demo-volume-file-system - persistentVolumeClaim: - claimName: demo-pvc-file-system diff --git a/deploy/kubernetes/examples/demo-statefulset-raw-block.yaml b/deploy/kubernetes/examples/demo-statefulset-raw-block.yaml deleted file mode 100644 index 50770d5a5..000000000 --- a/deploy/kubernetes/examples/demo-statefulset-raw-block.yaml +++ /dev/null @@ -1,27 +0,0 @@ -kind: StatefulSet -apiVersion: apps/v1 -metadata: - name: demo-statefulset-raw-block -spec: - selector: - matchLabels: - app: demo-statefulset - serviceName: demo-statefulset - replicas: 1 - template: - metadata: - labels: - app: demo-statefulset - spec: - containers: - - name: demo-container - image: registry.access.redhat.com/ubi8/ubi:latest - command: [ "/bin/sh", "-c", "--" ] - args: [ "while true; do sleep 30; done;" ] - volumeDevices: - - name: demo-volume-raw-block - devicePath: "/dev/block" - volumes: - - name: demo-volume-raw-block - persistentVolumeClaim: - claimName: demo-pvc-raw-block diff --git a/deploy/kubernetes/examples/demo-statefulset-combined.yaml b/deploy/kubernetes/examples/demo-statefulset.yaml similarity index 96% rename from deploy/kubernetes/examples/demo-statefulset-combined.yaml rename to deploy/kubernetes/examples/demo-statefulset.yaml index d4a884ce7..f510ad1e4 100644 --- a/deploy/kubernetes/examples/demo-statefulset-combined.yaml +++ b/deploy/kubernetes/examples/demo-statefulset.yaml @@ -1,7 +1,7 @@ kind: StatefulSet apiVersion: apps/v1 metadata: - name: demo-statefulset-combined + name: demo-statefulset spec: selector: matchLabels: diff --git a/deploy/kubernetes/examples/demo-storageclass-config-secret.yaml b/deploy/kubernetes/examples/demo-storageclass-config-secret.yaml new file mode 100644 index 000000000..4aa90c9d1 --- /dev/null +++ b/deploy/kubernetes/examples/demo-storageclass-config-secret.yaml @@ -0,0 +1,19 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: demo-storageclass-config-secret +provisioner: block.csi.ibm.com +volumeBindingMode: WaitForFirstConsumer +parameters: + # non-csi.storage.k8s.io parameters may be specified in by_management_id per system and/or outside by_management_id as the cross-system default. + + by_management_id: '{"demo-management-id-1":{"pool":"demo-pool-1","SpaceEfficiency":"deduplicated","volume_name_prefix":"demo-prefix-1"}, + "demo-management-id-2":{"pool":"demo-pool-2","volume_name_prefix":"demo-prefix-2"}}' # Optional. + pool: demo-pool + SpaceEfficiency: thin # Optional. + volume_name_prefix: demo-prefix # Optional. + + csi.storage.k8s.io/fstype: xfs # Optional. Values ext4/xfs. The default is ext4. + csi.storage.k8s.io/secret-name: demo-config-secret + csi.storage.k8s.io/secret-namespace: default +allowVolumeExpansion: true diff --git a/deploy/kubernetes/examples/demo-storageclass.yaml b/deploy/kubernetes/examples/demo-storageclass.yaml index f34a20b43..1d13b26e3 100644 --- a/deploy/kubernetes/examples/demo-storageclass.yaml +++ b/deploy/kubernetes/examples/demo-storageclass.yaml @@ -4,16 +4,11 @@ metadata: name: demo-storageclass provisioner: block.csi.ibm.com parameters: - SpaceEfficiency: deduplicated # Optional. pool: demo-pool + SpaceEfficiency: thin # Optional. + volume_name_prefix: demo-prefix # Optional. - csi.storage.k8s.io/provisioner-secret-name: demo-secret - csi.storage.k8s.io/provisioner-secret-namespace: default - csi.storage.k8s.io/controller-publish-secret-name: demo-secret - csi.storage.k8s.io/controller-publish-secret-namespace: default - csi.storage.k8s.io/controller-expand-secret-name: demo-secret - csi.storage.k8s.io/controller-expand-secret-namespace: default - - csi.storage.k8s.io/fstype: xfs # Optional. Values ext4\xfs. The default is ext4. - volume_name_prefix: demoPVC # Optional. + csi.storage.k8s.io/fstype: xfs # Optional. Values ext4/xfs. The default is ext4. + csi.storage.k8s.io/secret-name: demo-secret + csi.storage.k8s.io/secret-namespace: default allowVolumeExpansion: true diff --git a/deploy/kubernetes/examples/demo-volumereplication.yaml b/deploy/kubernetes/examples/demo-volumereplication.yaml new file mode 100644 index 000000000..c3b5f5f9b --- /dev/null +++ b/deploy/kubernetes/examples/demo-volumereplication.yaml @@ -0,0 +1,12 @@ +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplication +metadata: + name: demo-volumereplication + namespace: default +spec: + volumeReplicationClass: demo-volumereplicationclass + replicationState: primary + replicationHandle: demo-volumehandle + dataSource: + kind: PersistentVolumeClaim + name: demo-pvc-file-system # Ensure that this is in the same namespace as VolumeReplication. diff --git a/deploy/kubernetes/examples/demo-volumereplicationclass.yaml b/deploy/kubernetes/examples/demo-volumereplicationclass.yaml new file mode 100644 index 000000000..c592ca323 --- /dev/null +++ b/deploy/kubernetes/examples/demo-volumereplicationclass.yaml @@ -0,0 +1,12 @@ +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplicationClass +metadata: + name: demo-volumereplicationclass +spec: + provisioner: block.csi.ibm.com + parameters: + system_id: demo-system-id + copy_type: async # Optional. Values sync/async. The default is sync. + + replication.storage.openshift.io/replication-secret-name: demo-secret + replication.storage.openshift.io/replication-secret-namespace: default diff --git a/deploy/kubernetes/examples/demo-snapshot.yaml b/deploy/kubernetes/examples/demo-volumesnapshot.yaml similarity index 64% rename from deploy/kubernetes/examples/demo-snapshot.yaml rename to deploy/kubernetes/examples/demo-volumesnapshot.yaml index bef4b1300..396f194cd 100644 --- a/deploy/kubernetes/examples/demo-snapshot.yaml +++ b/deploy/kubernetes/examples/demo-volumesnapshot.yaml @@ -1,8 +1,8 @@ apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshot metadata: - name: demo-snapshot + name: demo-volumesnapshot spec: - volumeSnapshotClassName: demo-snapshotclass + volumeSnapshotClassName: demo-volumesnapshotclass source: persistentVolumeClaimName: demo-pvc-file-system diff --git a/deploy/kubernetes/examples/demo-volumesnapshotclass-config-secret.yaml b/deploy/kubernetes/examples/demo-volumesnapshotclass-config-secret.yaml new file mode 100644 index 000000000..958a04a86 --- /dev/null +++ b/deploy/kubernetes/examples/demo-volumesnapshotclass-config-secret.yaml @@ -0,0 +1,17 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: demo-volumesnapshotclass-config-secret +driver: block.csi.ibm.com +deletionPolicy: Delete +parameters: + # non-csi.storage.k8s.io parameters may be specified in by_management_id per system and/or outside by_management_id as the cross-system default. + + by_management_id: '{"demo-management-id-1":{"pool":"demo-pool-1","SpaceEfficiency":"deduplicated","snapshot_name_prefix":"demo-prefix-1"}, + "demo-management-id-2":{"pool":"demo-pool-2","snapshot_name_prefix":"demo-prefix-2"}}' # Optional. + pool: demo-pool # Optional. Use to create the snapshot on a different pool than the source. + SpaceEfficiency: thin # Optional. Use to create the snapshot with a different space efficiency than the source. + snapshot_name_prefix: demo-prefix # Optional. + + csi.storage.k8s.io/snapshotter-secret-name: demo-config-secret + csi.storage.k8s.io/snapshotter-secret-namespace: default diff --git a/deploy/kubernetes/examples/demo-volumesnapshotclass.yaml b/deploy/kubernetes/examples/demo-volumesnapshotclass.yaml new file mode 100644 index 000000000..a22fdf6ad --- /dev/null +++ b/deploy/kubernetes/examples/demo-volumesnapshotclass.yaml @@ -0,0 +1,13 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: demo-volumesnapshotclass +driver: block.csi.ibm.com +deletionPolicy: Delete +parameters: + pool: demo-pool # Optional. Use to create the snapshot on a different pool than the source. + SpaceEfficiency: thin # Optional. Use to create the snapshot with a different space efficiency than the source. + snapshot_name_prefix: demo-prefix # Optional. + + csi.storage.k8s.io/snapshotter-secret-name: demo-secret + csi.storage.k8s.io/snapshotter-secret-namespace: default diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 6f1598b62..b189c9cac 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,15 +1,17 @@ # Table of Contents -* [CSI repository welcome](book_files/csi_block_storage_kc_welcome.md) +* [Welcome](book_files/csi_block_storage_kc_welcome.md) * [What's new](book_files/csi_block_storage_kc_whatsnew.md) * [Release notes](book_files/csi_block_storage_kc_rn.md) - * [What's new in 1.6.0](content/release_notes/csi_rn_whatsnew.md) + * [What's new in 1.7.0](content/release_notes/csi_rn_whatsnew.md) * [Compatibility and requirements](content/release_notes/csi_rn_compatibility.md) * [Supported storage systems](content/release_notes/csi_rn_supported_storage.md) * [Supported operating systems](content/release_notes/csi_rn_supported_os.md) * [Supported orchestration platforms](content/release_notes/csi_rn_supported_orchestration.md) * [Change log](content/release_notes/csi_rn_changelog.md) + * [1.7.0 (September 2021)](content/release_notes/csi_rn_changelog_1.7.0.md) * [1.6.0 (June 2021)](content/release_notes/csi_rn_changelog_1.6.0.md) + * [1.5.1 (July 2021)](content/release_notes/csi_rn_changelog_1.5.1.md) * [1.5.0 (March 2021)](content/release_notes/csi_rn_changelog_1.5.0.md) * [1.4.0 (December 2020)](content/release_notes/csi_rn_changelog_1.4.0.md) * [1.3.0 (September 2020)](content/release_notes/csi_rn_changelog_1.3.0.md) @@ -38,14 +40,20 @@ * [Creating a StatefulSet](content/configuration/csi_ug_config_create_statefulset.md) * [Creating a VolumeSnapshotClass](content/configuration/csi_ug_config_create_vol_snapshotclass.md) * [Creating a VolumeSnapshot](content/configuration/csi_ug_config_create_snapshots.md) + * [Creating a VolumeReplicationClass](content/configuration/csi_ug_config_create_vol_replicationclass.md) + * [Finding a `system_id`](content/configuration/csi_ug_config_replication_find_systemid.md) + * [Creating a VolumeReplication](content/configuration/csi_ug_config_create_replication.md) * [Expanding a PersistentVolumeClaim (PVC)](content/configuration/csi_ug_config_expand_pvc.md) + * [Configuring for CSI Topology](content/configuration/csi_ug_config_topology.md) + * [Creating a Secret with topology awareness](content/configuration/csi_ug_config_create_secret_topology.md) + * [Creating a StorageClass with topology awareness](content/configuration/csi_ug_config_create_storageclasses_topology.md) + * [Creating a VolumeSnapshotClass with topology awareness](content/configuration/csi_ug_config_create_vol_snapshotclass_topology.md) * [Advanced configuration](content/configuration/csi_ug_config_advanced.md) * [Importing an existing volume](content/configuration/csi_ug_config_advanced_importvol.md) * [Using IBM block storage CSI driver](content/using/csi_ug_using.md) * [Sample configurations for running a stateful container](content/using/csi_ug_using_sample.md) * [Troubleshooting](content/troubleshooting/csi_ug_troubleshooting.md) - * [Log collection](content/troubleshooting/csi_ug_troubleshooting_logs.md) - * [Detecting errors](content/troubleshooting/csi_ug_troubleshooting_detect_errors.md) + * [Log and status collection](content/troubleshooting/csi_ug_troubleshooting_logs.md) * [Recovering a pod volume attachment from a crashed Kubernetes node](content/troubleshooting/csi_ug_troubleshooting_node_crash.md) * [Miscellaneous troubleshooting](content/troubleshooting/csi_ug_troubleshooting_misc.md) * [Notices](book_files/storage_csi_notices.md) diff --git a/docs/book_files/IBM_block_storage_CSI_driver_1.6.0_RN.ditamap b/docs/book_files/IBM_block_storage_CSI_driver_1.7.0_RN.ditamap similarity index 92% rename from docs/book_files/IBM_block_storage_CSI_driver_1.6.0_RN.ditamap rename to docs/book_files/IBM_block_storage_CSI_driver_1.7.0_RN.ditamap index e194fba59..77a5e1149 100644 --- a/docs/book_files/IBM_block_storage_CSI_driver_1.6.0_RN.ditamap +++ b/docs/book_files/IBM_block_storage_CSI_driver_1.7.0_RN.ditamap @@ -3,8 +3,9 @@ IBM block storage CSI -driver1.6.0 -Release Notes +driver1.7.0 +Release Notes diff --git a/docs/book_files/IBM_block_storage_CSI_driver_1.6.0_UG.ditamap b/docs/book_files/IBM_block_storage_CSI_driver_1.7.0_UG.ditamap similarity index 94% rename from docs/book_files/IBM_block_storage_CSI_driver_1.6.0_UG.ditamap rename to docs/book_files/IBM_block_storage_CSI_driver_1.7.0_UG.ditamap index d989abec3..6ac483eac 100644 --- a/docs/book_files/IBM_block_storage_CSI_driver_1.6.0_UG.ditamap +++ b/docs/book_files/IBM_block_storage_CSI_driver_1.7.0_UG.ditamap @@ -6,7 +6,7 @@ IBM block storage CSI -driver1.6.0 +driver1.7.0 User Guide @@ -17,9 +17,6 @@ driver1.6.0 - -SC27-9590-10 - 2021 diff --git a/docs/book_files/csi_block_storage_kc_pdfs.md b/docs/book_files/csi_block_storage_kc_pdfs.md index 0e2f928dc..f6ca0deab 100644 --- a/docs/book_files/csi_block_storage_kc_pdfs.md +++ b/docs/book_files/csi_block_storage_kc_pdfs.md @@ -8,8 +8,8 @@ To view a PDF file, you need Adobe™ Reader. You can download it at no charge f |IBM block storage CSI driver publication|Description|PDF download link| |----------------------------------------|-----------|-----------------| -|*Release notes*|This publication describes requirements, compatibility, change log, and known issues information for version 1.6.0.
First Edition (June 2021) for version 1.6.0.|![PDF icon](PDF_icon.jpg) [Download](../pdf/IBM_block_storage_CSI_driver_1.6.0_RN.pdf)| -|*User guide*|This publication describes how to prepare for, install, configure, and use IBM block storage CSI driver.
Publication number: SC27-9590-10.|![PDF icon](PDF_icon.jpg) [Download](../pdf/IBM_block_storage_CSI_driver_1.6.0_UG.pdf)| +|*Release notes*|This publication describes requirements, compatibility, change log, and known issues information for version 1.7.0.
First Edition (June 2021) for version 1.7.0.|![PDF icon](PDF_icon.jpg) [Download](../pdf/IBM_block_storage_CSI_driver_1.7.0_RN.pdf)| +|*User guide*|This publication describes how to prepare for, install, configure, and use IBM block storage CSI driver.
First Edition (September 2021).|![PDF icon](PDF_icon.jpg) [Download](../pdf/IBM_block_storage_CSI_driver_1.7.0_UG.pdf)| ## Related information and publications @@ -19,7 +19,7 @@ IBM resources - [IBM SAN Volume Controller documentation](https://www.ibm.com/docs/en/sanvolumecontroller) - [IBM Spectrum Scale documentation](https://www.ibm.com/docs/en/spectrum-scale) -- [IBM FlashSystem® 5200, 5000, 5100, Storwize® V5100 and V5000E documentation](http://www.ibm.com/docs/en/f555sv-and-v) +- [IBM FlashSystem® 5200, 5000, 5100, Storwize® V5100 and V5000E documentation](https://www.ibm.com/docs/en/flashsystem-5x00) - [IBM FlashSystem™ 7200 and Storwize V7000 documentation](https://www.ibm.com/docs/en/flashsystem-7x00) - [IBM Spectrum Virtualize as Software Only documentation](https://www.ibm.com/docs/en/spectrumvirtualsoftw) - [IBM FlashSystem 9200 and 9100 documentation](https://www.ibm.com/docs/en/flashsystem-9x00) diff --git a/docs/book_files/csi_block_storage_kc_rn.md b/docs/book_files/csi_block_storage_kc_rn.md index f8b740895..4da04204f 100644 --- a/docs/book_files/csi_block_storage_kc_rn.md +++ b/docs/book_files/csi_block_storage_kc_rn.md @@ -1,4 +1,4 @@ # Release notes -The following release information is available for version 1.6.0 of the IBM® block storage CSI driver. +The following release information is available for version 1.7.0 of the IBM® block storage CSI driver. diff --git a/docs/book_files/csi_block_storage_kc_welcome.md b/docs/book_files/csi_block_storage_kc_welcome.md index 289027462..6a51d9f92 100644 --- a/docs/book_files/csi_block_storage_kc_welcome.md +++ b/docs/book_files/csi_block_storage_kc_welcome.md @@ -1,4 +1,4 @@ -# IBM block storage CSI driver 1.6.0 welcome page +# IBM block storage CSI driver 1.7.0 welcome page IBM block storage CSI driver is based on an open-source IBM project, included as a part of IBM Storage orchestration for containers. IBM Storage orchestration for containers enables enterprises to implement a modern container-driven hybrid multicloud environment that can diff --git a/docs/book_files/csi_block_storage_kc_whatsnew.md b/docs/book_files/csi_block_storage_kc_whatsnew.md index 8dd98fc58..4d3489921 100644 --- a/docs/book_files/csi_block_storage_kc_whatsnew.md +++ b/docs/book_files/csi_block_storage_kc_whatsnew.md @@ -1,8 +1,8 @@ # What's new -This topic lists the dates and nature of updates to the published information of IBM® block storage CSI driver 1.6.0. +This topic lists the dates and nature of updates to the published information of IBM® block storage CSI driver 1.7.0. |Date|Nature of updates to the published information| |----|----------------------------------------------| -|17 June 2021|The version information was added to the IBM Documentation. For more information about this version, see the [What's new in 1.6.0](../content/release_notes/csi_rn_whatsnew.md) section in the release notes.| +|30 September 2021|The version information was added to the IBM Documentation.
For more information about this version, see the [What's new in 1.7.0](../content/release_notes/csi_rn_whatsnew.md) section in the release notes.
In addition, a new [Lifecycle support matrix](https://www.ibm.com/docs/en/stg-block-csi-driver?topic=SSRQ8T/landing/csi_lifecycle_support_matrix.html) was added to IBM Documentation for this product.| diff --git a/docs/book_files/csi_rn_content.ditamap b/docs/book_files/csi_rn_content.ditamap index 457e7ce2d..c9ed64556 100644 --- a/docs/book_files/csi_rn_content.ditamap +++ b/docs/book_files/csi_rn_content.ditamap @@ -2,7 +2,7 @@ -<tm trademark="IBM" tmtype="reg">IBM</tm> block storage CSI driver 1.6.0 Release <tm +<title><tm trademark="IBM" tmtype="reg">IBM</tm> block storage CSI driver 1.7.0 Release <tm trademark="Notes" tmtype="reg">Notes</tm> content map @@ -12,7 +12,10 @@ trademark="Notes" tmtype="reg">Notes content map + + -First Edition (June 2021) -This edition applies to version 1.6.0 of the IBM +First Edition (September 2021) +This edition applies to version 1.7.0 of the IBM block storage CSI driver software package. Newer document editions may be issued for the same product version in order to add missing information, update information, or amend typographical errors. The edition is reset to 'First Edition' for every new product version. diff --git a/docs/book_files/csi_ug_content.ditamap b/docs/book_files/csi_ug_content.ditamap index 5a7c6d706..6683d851f 100644 --- a/docs/book_files/csi_ug_content.ditamap +++ b/docs/book_files/csi_ug_content.ditamap @@ -32,14 +32,29 @@ navtitle="Installing the driver with the OpenShift web console" format="markdown - - - - + + + + + + + + - - + + + + + + + @@ -48,7 +63,6 @@ navtitle="Sample configurations for running a stateful container" format="markdo - diff --git a/docs/content/configuration/csi_ug_config.md b/docs/content/configuration/csi_ug_config.md index 290a89973..5cab8e384 100644 --- a/docs/content/configuration/csi_ug_config.md +++ b/docs/content/configuration/csi_ug_config.md @@ -2,18 +2,18 @@ Use this information to configure the IBM® block storage CSI driver after installation. -Once the driver is installed and running (see [Installing the operator and driver](../installation/csi_ug_install_operator.md)), in order to use the driver and run stateful applications using IBM block storage systems, the relevant yaml files must be created. - -Multiple yaml files per type can be created (with different configurations), according to your storage needs. - -- [Creating a Secret](csi_ug_config_create_secret.md) -- [Creating a StorageClass](csi_ug_config_create_storageclasses.md) -- [Creating a PersistentVolumeClaim (PVC)](csi_ug_config_create_pvc.md) -- [Creating a StatefulSet](csi_ug_config_create_statefulset.md) -- [Creating a VolumeSnapshotClass](csi_ug_config_create_vol_snapshotclass.md) -- [Creating a VolumeSnapshot](csi_ug_config_create_snapshots.md) -- [Expanding a PersistentVolumeClaim (PVC)](csi_ug_config_expand_pvc.md) -- [Advanced configuration](csi_ug_config_advanced.md) - +Once the driver is installed and running (see [Installing the operator and driver](../installation/csi_ug_install_operator.md)), in order to use the driver and run stateful applications using IBM block storage systems, the relevant YAML files must be created. +Multiple YAML files per type can be created (with different configurations), according to your storage needs. +- [Creating a Secret](csi_ug_config_create_secret.md) +- [Creating a StorageClass](csi_ug_config_create_storageclasses.md) +- [Creating a PersistentVolumeClaim (PVC)](csi_ug_config_create_pvc.md) +- [Creating a StatefulSet](csi_ug_config_create_statefulset.md) +- [Creating a VolumeSnapshotClass](csi_ug_config_create_vol_snapshotclass.md) +- [Creating a VolumeSnapshot](csi_ug_config_create_snapshots.md) +- [Creating a VolumeReplicationClass](csi_ug_config_create_vol_replicationclass.md) +- [Creating a VolumeReplication](csi_ug_config_create_replication.md) +- [Expanding a PersistentVolumeClaim (PVC)](csi_ug_config_expand_pvc.md) +- [Configuring for CSI Topology](csi_ug_config_topology.md) +- [Advanced configuration](csi_ug_config_advanced.md) \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_advanced_importvol.md b/docs/content/configuration/csi_ug_config_advanced_importvol.md index d8d81d101..82e7cd7bb 100644 --- a/docs/content/configuration/csi_ug_config_advanced_importvol.md +++ b/docs/content/configuration/csi_ug_config_advanced_importvol.md @@ -1,121 +1,139 @@ # Importing an existing volume -Use this information to import volumes created externally from the IBM® block storage CSI driver by using a persistent volume (PV) yaml file. +Use this information to import volumes that were created externally from the IBM® block storage CSI driver by using a persistent volume (PV) YAML file. -Before starting to import an existing volume, find the following information in the existing volume in order to include the information in the persistent volume (PV) yaml file: +Before starting to import an existing volume, find the following information in the existing volume in order to include the information in the persistent volume (PV) YAML file: +- `volumeHandle` +- `volumeAttributes` (optional) + + Including: -- `volumeHandle` -- `volumeAttributes` (optional) - - Including: - - - `pool_name`: _Name of Pool where volume is located_ (Listed as `pool_id` for DS8000® Family systems.) - - `storage_type`: - - `volume_name`: _Volume name_ - - `array_address`: _Array address_ + - `pool_name`: _Name of Pool where volume is located_ (Listed as `pool_id` for DS8000® Family systems.) + - `storage_type`: <`SVC` | `A9000` | `DS8K`> + - `volume_name`: _Volume name_ + - `array_address`: _Array address_ To find the `volumeHandle`, use one of the following procedures: -- Through command line (for Spectrum Virtualize Family): +- **For Spectrum Virtualize Family** - `lsvdisk | grep vdisk_UID` - - ``` - lsvdisk vol0 | grep vdisk_UID - vdisk_UID 600507640082000B08000000000004FF - ``` + The `volumeHandle` is formatted as `SVC:id;vdisk_UID`. -- Through command line (for FlashSystem A9000 and A9000R): + - Through command line: + Find both the `id` and `vdisk_UID` attributes, by using the `lsvdisk` command. - `vol_list_extended vol=` + For more information, see **Command-line interface** > **Volume commands** > **lsvdisk** within your specific product documentation on [IBM Documentation](https://www.ibm.com/docs/en). - For example, for `vol1`: + - Through the management GUI: - ``` - A9000>> vol_list_extended vol=vol1 - Name WWN Product Serial Number - vol1 6001738CFC9035E8000000000091F0C0 60035E8000000000091F0C0 - ``` + 1. Select **Volumes** > **Volumes** from the side bar. -- Through the Spectrum Virtualize management GUI: + The **Volumes** page is displayed. - 1. Select **Volumes** > **Volumes** from the side bar. + 2. Browse to the volume that the port is on and right-click > **Properties**. - The **Volumes** page appears. + The Properties window is displayed. Use the **Volume ID** and **Volume UID** values. - 2. Browse to the volume that the port is on and right-click > **Properties**. + For more information about Spectrum Virtualize products, find your product information in [IBM Documentation](https://www.ibm.com/docs/). + +- **For FlashSystem A9000 and A9000R:** - The Properties window appears. Use the UID number. + The `volumeHandle` is formatted as `A9000:id;WWN`. + + - Through command line: - For more information about Spectrum Virtualize products, find your product information in [IBM Documentation](https://www.ibm.com/docs/). + Find the `id` and `WWN` for the volume, by using the `vol_list -f` command. -- Through the IBM Hyper-Scale Manager user interface for FlashSystem A9000 and A90000R storage systems: + For more information, see **Reference** > **Command-line reference (12.3.2.x)** > **Volume management commands** > **Listing volumes** within your specific product documentation on [IBM Documentation](https://www.ibm.com/docs/en). - 1. Select **Pools and Volumes Views** > **Volumes** from the side bar. + - Through the Hyper-Scale Management user interface: - The **Volumes** table is displayed. + 1. Select **Pools and Volumes Views** > **Volumes** from the side bar. - 2. Select the `Volume`. + The **Volumes** table is displayed. - The **Volume Properties** form appears. + 2. Select the `Volume`. - 3. Use the **ID** number. + The **Volume Properties** form is displayed. + + 3. Use the **ID** and **WWN** values. - For more information, see [IBM Hyper-Scale Manager documentation](https://www.ibm.com/docs/en/hyper-scale-manager/). + For more information, see [IBM Hyper-Scale Manager documentation](https://www.ibm.com/docs/en/hyper-scale-manager/). +- **For DS8000 Family:** -Use this procedure to help build a PV yaml file for your volumes. + The `volumeHandle` is formatted as `DS8K:id;GUID`. + The `id` is the last four digits of the `GUID`. -**Note:** These steps are setup for importing volumes from a Spectrum Virtualize Family system. Change parameters, as needed. + - Through the command line: -1. Create a persistent volume (PV) yaml file. + Find the `GUID` for the volume, by using the `lsfbvol` command. - **Important:** Be sure to include the `storageClassName` and `controllerPublishSecretRef` parameters or errors will occur. + For more information, see **Reference** > **Command-line interface** > **CLI commands** > **Storage configuration commands** > **Fixed block logical volume specific commands** > **lsfbvol** within your specific product documentation on [IBM Documentation](https://www.ibm.com/docs/en). -2. Take the `volume_name` and other optional information (collected before the procedure) and insert it into the yaml file. + - Through the DS8000 Storage Management GUI: -
-    apiVersion: v1
-    kind: PersistentVolume
-    metadata:
-      # annotations:
-        # pv.kubernetes.io/provisioned-by: block.csi.ibm.com
-      name: vol1-pv
-    spec:
-      accessModes:
-      - ReadWriteOnce
-      capacity:
-        storage: 1Gi
-      csi:
-        controllerPublishSecretRef:
-          name: demo-secret
-          namespace: default
-        driver: block.csi.ibm.com
-        # volumeAttributes:
-          # pool_name: ibmc-block-gold
-          # storage_type: SVC
-          # volume_name: vol1
-          # array_address: baremetal10-cluster.xiv.ibm.com
-        volumeHandle: SVC:600507640082000B08000000000004FF
-      # persistentVolumeReclaimPolicy: Retain
-      storageClassName: ibmc-block-gold
-      # volumeMode: Filesystem
-    
+ 1. Select **Volumes** from the side bar. + + The **Volumes** page is displayed. + + 2. Browse to the volume that the port is on and right-click > **Properties**. + + The Properties window is displayed. Use the **GUID** value. + + For more information about DS8000 Family products, find your product information in [IBM Documentation](https://www.ibm.com/docs/). + + +Use this procedure to help build a PV YAML file for your volumes. + +**Note:** These steps are set up for importing volumes from a Spectrum Virtualize Family system. Change parameters, as needed. + +1. Create a persistent volume (PV) YAML file. + + **Important:** Be sure to include the `storageClassName` and `controllerPublishSecretRef` parameters or errors may occur. -3. Create a PersistentVolumeClaim (PVC) yaml file. +2. Take the `volume_name` and other optional information (collected before the procedure) and insert it into the YAML file (under `spec.csi.volumeAttributes`). + + **Important:** If using the CSI Topology feature, the `spec.csi.volumeHandle` contains the management ID (see [Creating a StorageClass with topology awareness](csi_ug_config_create_storageclasses_topology.md)). In the example below, the `spec.csi.volumeHandle` would read similar to the following: `SVC:demo-system-id-1:0;600507640082000B08000000000004FF`. + + apiVersion: v1 + kind: PersistentVolume + metadata: + # annotations: + # pv.kubernetes.io/provisioned-by: block.csi.ibm.com + name: demo-pv + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + csi: + controllerPublishSecretRef: + name: demo-secret + namespace: default + driver: block.csi.ibm.com + # volumeAttributes: + # pool_name: demo-pool + # storage_type: SVC + # volume_name: demo-prefix_demo-pvc-file-system + # array_address: demo-management-address + volumeHandle: SVC:0;600507640082000B08000000000004FF + # persistentVolumeReclaimPolicy: Retain + storageClassName: demo-storageclass + # volumeMode: Filesystem + +3. Create a PersistentVolumeClaim (PVC) YAML file. **Note:** - - To include a specific 5 Gi PV, be sure to include the `storageClassName`. - - For more information about creating a PVC yaml file, see [Creating a PersistentVolumeClaim (PVC)](csi_ug_config_create_pvc.md). + - Be sure to include the `storageClassName`. + - For more information about creating a PVC YAML file, see [Creating a PersistentVolumeClaim (PVC)](csi_ug_config_create_pvc.md). - ```screen + ``` apiVersion: v1 kind: PersistentVolumeClaim metadata: - # annotations: - # pv.kubernetes.io/provisioned-by: block.csi.ibm.com - name: vol1-pvc + name: demo-pvc spec: accessModes: - ReadWriteOnce @@ -123,42 +141,5 @@ Use this procedure to help build a PV yaml file for your volumes. requests: storage: 1Gi storageClassName: ibmc-block-gold - volumeName: vol1-pv - ``` - -4. Create a StatefulSet. - - For more information about creating a StatefulSet, see [Creating a StatefulSet](csi_ug_config_create_statefulset.md). - - ```screen - kind: StatefulSet - apiVersion: apps/v1 - metadata: - name: sanity-statefulset - spec: - selector: - matchLabels: - app: sanity-statefulset - serviceName: sanity-statefulset - replicas: 1 - template: - metadata: - labels: - app: sanity-statefulset - spec: - containers: - - name: container1 - image: registry.access.redhat.com/ubi8/ubi:latest - command: [ "/bin/sh", "-c", "--" ] - args: [ "while true; do sleep 30; done;" ] - volumeMounts: - - name: vol1 - mountPath: "/data" - volumes: - - name: vol1 - persistentVolumeClaim: - claimName: vol1-pvc - - ``` - - + volumeName: demo-pv + ``` \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_create_pvc.md b/docs/content/configuration/csi_ug_config_create_pvc.md index f05c409ad..3ef849f9b 100644 --- a/docs/content/configuration/csi_ug_config_create_pvc.md +++ b/docs/content/configuration/csi_ug_config_create_pvc.md @@ -1,20 +1,20 @@ # Creating a PersistentVolumeClaim (PVC) -Create a PersistentVolumeClaim (PVC) yaml file for a persistent volume (PV). +Create a PersistentVolumeClaim (PVC) YAML file for a persistent volume (PV). -The IBM® block storage CSI driver supports using both file system and raw block volume types. +The IBM® block storage CSI driver supports using both file system and raw block volume modes. -**Important:** If not defined, the default type is `Filesystem`. Be sure to define the type as `Block` if this configuration is preferred. +**Important:** If not defined, the default mode is `Filesystem`. Be sure to define the mode as `Block` if this configuration is preferred. **Note:** The examples below create the PVC with a storage size 1 Gb. This can be changed, per customer needs. -Use the sections below for creating yaml files for PVCs with file system and raw block volume types. After each yaml file creation, use the `kubectl apply` command. +Use the sections below for creating YAML files for PVCs with file system and raw block volume modes. After each YAML file creation, use the `kubectl apply` command. ``` kubectl apply -f .yaml ``` -The `persistentvolumeclaim/ created` message is emitted. +The `persistentvolumeclaim/ created` message is emitted. Use the following sections, according to your PVC needs: @@ -25,87 +25,83 @@ Use the following sections, according to your PVC needs: ## Creating PVC for volume with Filesystem -Create a PVC yaml file, similar to the following demo-pvc-file-system.yaml file, with the size of 1 Gb. +Create a PVC YAML file, similar to the following `demo-pvc-file-system.yaml` file, with the size of 1 Gb, with `volumeMode` defined as `Filesystem`. **Note:** `volumeMode` is an optional field. `Filesystem` is the default if the value is not added. -
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
-  name: demo-pvc-file-system
-spec:
-  volumeMode: Filesystem  # Optional. The default is Filesystem.
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-  storageClassName: demo-storageclass
-
+ kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: demo-pvc-file-system + spec: + volumeMode: Filesystem # Optional. The default is Filesystem. + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: demo-storageclass ## Creating PVC for raw block volume -Create a PVC yaml file, similar to the following demo-pvc-raw-block.yaml file, with the size of 1 Gb. - -
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
-  name: demo-pvc-raw-block
-spec:
-  volumeMode: Block
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-  storageClassName: demo-storageclass
-
+Create a PVC YAML file, similar to the following `demo-pvc-raw-block.yaml` file, with the size of 1 Gb, with `volumeMode` defined as `Block`. + + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: demo-pvc-raw-block + spec: + volumeMode: Block + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: demo-storageclass ## Creating PVC from volume snapshot -To create a PVC from an existing volume snapshot, create a PVC yaml file, similar to the following demo-pvc-from-snapshot.yaml file, with the size of 1 Gb. - -
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
-  name: demo-pvc-from-snapshot
-spec:
-  volumeMode: Filesystem
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-  storageClassName: demo-storageclass
-  dataSource:
-    name: demo-snapshot
-    kind: VolumeSnapshot
-    apiGroup: snapshot.storage.k8s.io
-
+To create a PVC from an existing volume snapshot, create a PVC YAML file, similar to the following `demo-pvc-from-snapshot.yaml` file, with the size of 1 Gb. + +Update the `dataSource` parameters to reflect the existing volume snapshot information, where `kind` is `VolumeSnapshot`. + + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: demo-pvc-from-snapshot + spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: demo-storageclass + dataSource: + name: demo-volumesnapshot + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io ## Creating a volume clone from an existing PVC **Note:** IBM FlashCopy® function is referred to as the more generic volume snapshots and cloning within this documentation set. Not all supported products use the FlashCopy function terminology. -To create a volume clone from an existing PVC object, create a PVC yaml file, similar to the following demo-pvc-cloned-pvc.yaml file, with the size of 1 Gb. - -
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
-  name: demo-pvc-cloned-pvc
-spec:
-  volumeMode: Filesystem
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-  storageClassName: demo-storageclass
-  dataSource:
-    name: demo-pvc-file-system
-    kind: PersistentVolumeClaim
-
+To create a volume clone from an existing PVC object, create a PVC YAML file, similar to the following `demo-pvc-cloned-pvc.yaml` file, with the size of 1 Gb. + +Update the `dataSource` parameters to reflect the existing PVC object information, where `kind` is `PersistentVolumeClaim`. + + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: demo-pvc-cloned-pvc + spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: demo-storageclass + dataSource: + name: demo-pvc-file-system + kind: PersistentVolumeClaim \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_create_replication.md b/docs/content/configuration/csi_ug_config_create_replication.md new file mode 100644 index 000000000..bb4638b64 --- /dev/null +++ b/docs/content/configuration/csi_ug_config_create_replication.md @@ -0,0 +1,50 @@ +# Creating a VolumeReplication + +Create a VolumeReplication YAML file to replicate a specific PersistentVolumeClaim (PVC). + +VolumeReplicationClass needs to be present before a VolumeReplication can be created. For more information, see [Creating a VolumeReplicationClass](csi_ug_config_create_vol_replicationclass.md). + +**Note:** Remote copy function is referred to as the more generic volume replication within this documentation set. Not all supported products use the remote-copy function terminology. + +When replicating a volume, be sure to follow all of the replication configurations, found in [Compatibility and requirements](../installation/csi_ug_requirements.md) before volume replication. + +1. Replicate a specific PersistentVolumeClaim (PVC) using the `demo-volumereplication.yaml`. + + For more information about PVC configuration, see [Creating a PersistentVolumeClaim (PVC)](csi_ug_config_create_pvc.md). + + **Note:** Use the `spec.csi.volumeHandle` of the relevant target PersistentVolume (PV) for the `replicationHandle` value. + + ``` + apiVersion: replication.storage.openshift.io/v1alpha1 + kind: VolumeReplication + metadata: + name: demo-volumereplication + namespace: default + spec: + volumeReplicationClass: demo-volumereplicationclass + replicationState: primary + replicationHandle: demo-replicationhandle + dataSource: + kind: PersistentVolumeClaim + name: demo-pvc-file-system # Ensure that this is in the same namespace as VolumeReplication. + ``` + +2. After the YAML file is created, apply it by using the `kubectl apply -f` command. + + ``` + kubectl apply -f .yaml + ``` + + The `volumereplication.replication.storage.openshift.io/ created` message is emitted. + +3. Verify that the volume was replicated. + + Run the `kubectl describe volumereplication` command. + + See the `status.state` section to see which of the following states the replication is in: + + - **Primary** Indicates that the source volume is the primary volume. + - **Secondary** Indicates that the source volume is the secondary volume. + - **Unknown** Indicates that the driver does not recognize the replication state. + + **Note:** For information about changing the replication state, see the [Usage](https://github.com/csi-addons/volume-replication-operator/tree/v0.2.0#usage) section of the Volume Replication Operator for csi-addons. \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_create_secret.md b/docs/content/configuration/csi_ug_config_create_secret.md index 848bd497a..04dd67771 100644 --- a/docs/content/configuration/csi_ug_config_create_secret.md +++ b/docs/content/configuration/csi_ug_config_create_secret.md @@ -4,33 +4,35 @@ Create an array secret YAML file in order to define the storage credentials (use **Important:** When your storage system password is changed, be sure to also change the passwords in the corresponding secrets, particularly when LDAP is used on the storage systems.

Failing to do so causes mismatched passwords across the storage systems and the secrets, causing the user to be locked out of the storage systems. +**Note:** If using the CSI Topology feature, follow the steps in [Creating a Secret with topology awareness](csi_ug_config_create_secret_topology.md). + Use one of the following procedures to create and apply the secret: ## Creating an array secret file -1. Create the secret file, similar to the following demo-secret.yaml: +1. Create the secret file, similar to the following `demo-secret.yaml`: The `management_address` field can contain more than one address, with each value separated by a comma. ``` - kind: Secret - apiVersion: v1 - metadata: - name: demo-secret - namespace: default - type: Opaque - stringData: - management_address: demo-management-address # Array management addresses - username: demo-username # Array username - data: - password: ZGVtby1wYXNzd29yZA== # base64 array password + kind: Secret + apiVersion: v1 + metadata: + name: demo-secret + namespace: default + type: Opaque + stringData: + management_address: demo-management-address # Array management addresses + username: demo-username # Array username + data: + password: ZGVtby1wYXNzd29yZA== # base64 array password ``` 2. Apply the secret using the following command: - `kubectl apply -f demo-secret.yaml` + `kubectl apply -f .yaml` - The `secret/ created` message is emitted. + The `secret/ created` message is emitted. ## Creating an array secret via command line @@ -38,7 +40,6 @@ Use one of the following procedures to create and apply the secret: Create the secret using the following command: - ``` - kubectl create secret generic --from-literal=username= --from-literal=password=--from-literal=management_address= -n - ``` - +``` +kubectl create secret generic demo-secret --from-literal=username=demo-username --from-literal=password=demo-password --from-literal=management_address=demo-management-address -n default +``` \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_create_secret_topology.md b/docs/content/configuration/csi_ug_config_create_secret_topology.md new file mode 100644 index 000000000..920fd247f --- /dev/null +++ b/docs/content/configuration/csi_ug_config_create_secret_topology.md @@ -0,0 +1,46 @@ +# Creating a Secret with topology awareness + +Create an array secret YAML file to define the storage credentials (username and password) and address. Use this information for creating a Secret that is topology aware. + +**Note:** If you are not using the CSI Topology feature, follow the steps in [Creating a Secret](csi_ug_config_create_secret.md). + +Within the Secret, each user-defined management ID (here, represented by `demo-management-id-x`), is used to identify the storage system within other configuration files. + +**Note:** The management ID must start and end with a character or number. In addition, the following symbols may be used within the management ID: -, _, and .. + +1. Create the secret file, similar to the following `demo-secret-config.json`: + + The `management_address` field can contain more than one address, with each value separated by a comma. + + { + "demo-management-id-1": { + "username": "demo-username-1", + "password": "demo-password-1", + "management_address": "demo-management-address-1", + "supported_topologies": [ + { + "topology.block.csi.ibm.com/demo-region": "demo-region-1", + "topology.block.csi.ibm.com/demo-zone": "demo-zone-1" + } + ] + }, + "demo-management-id-2": { + "username": "demo-username-2", + "password": "demo-password-2", + "management_address": "demo-management-address-2", + "supported_topologies": [ + { + "topology.block.csi.ibm.com/demo-region": "demo-region-2", + "topology.block.csi.ibm.com/demo-zone": "demo-zone-2" + } + ] + } + } + +2. Apply the secret using the following command: + + `kubectl create secret generic -n --from-file=config=demo-secret-config.json` + + + The `secret/ created` message is emitted. + diff --git a/docs/content/configuration/csi_ug_config_create_snapshots.md b/docs/content/configuration/csi_ug_config_create_snapshots.md index 9e61769e1..d418d879d 100644 --- a/docs/content/configuration/csi_ug_config_create_snapshots.md +++ b/docs/content/configuration/csi_ug_config_create_snapshots.md @@ -1,27 +1,26 @@ # Creating a VolumeSnapshot -Create a VolumeSnapshot yaml file for a specific PersistentVolumeClaim (PVC). +Create a VolumeSnapshot YAML file for a specific PersistentVolumeClaim (PVC). VolumeSnapshotClass needs to be present before a VolumeSnapshot can be created. For more information, see [Creating a VolumeSnapshotClass](csi_ug_config_create_vol_snapshotclass.md). **Note:** - IBM® FlashCopy® function is referred to as the more generic volume snapshots and cloning within this documentation set. Not all supported products use the FlashCopy function terminology. -- For volume snapshot support, the minimum orchestration platform version requirements are Red Hat® OpenShift® 4.4 and Kubernetes 1.17. When creating volume snapshots, be sure to follow all of the snapshot configurations, found in [Compatibility and requirements](../installation/csi_ug_requirements.md) before snapshot creation. -1. Create a snapshot for a specific PersistentVolumeClaim (PVC) using the demo-snapshot.yaml. +1. Create a snapshot for a specific PersistentVolumeClaim (PVC) using the `demo-volumesnapshot.yaml`. For more information about PVC configuration, see [Creating a PersistentVolumeClaim (PVC)](csi_ug_config_create_pvc.md). - ```screen + ``` apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshot metadata: - name: demo-snapshot + name: demo-volumesnapshot spec: - volumeSnapshotClassName: demo-snapshotclass + volumeSnapshotClassName: demo-volumesnapshotclass source: persistentVolumeClaimName: demo-pvc-file-system ``` @@ -32,9 +31,11 @@ When creating volume snapshots, be sure to follow all of the snapshot configurat kubectl apply -f .yaml ``` -3. Verify that the VolumeSnapshot was created. + The `volumesnapshot.snapshot.storage.k8s.io/ created` message is emitted. + +3. Verify that the volume snapshot was created. - Run the kubectl describe volumesnapshot command. + Run the `kubectl describe volumesnapshot ` command. See the **Status** section of the output for the following: diff --git a/docs/content/configuration/csi_ug_config_create_statefulset.md b/docs/content/configuration/csi_ug_config_create_statefulset.md index df8144e6f..390b56ef0 100644 --- a/docs/content/configuration/csi_ug_config_create_statefulset.md +++ b/docs/content/configuration/csi_ug_config_create_statefulset.md @@ -1,127 +1,126 @@ # Creating a StatefulSet -Create a StatefulSet yaml file to manage stateful applications. +Create a StatefulSet YAML file to manage stateful applications. -The IBM® block storage CSI driver supports using both file system and raw block volume types. +The IBM® block storage CSI driver supports both file system and raw block volume modes. -StatefulSets can include volumes with file systems, raw block volume systems, or both. +StatefulSets can include file system volumes, raw block volumes, or both. -**Important:** When defining the StatefulSet configuration, be sure to define volumes according to the PVC type. +**Important:** When defining the StatefulSet configuration, be sure to define volumes according to the PVC volume mode. -Use the sections below for yaml creation of StatefulSets with file system, raw block volume, and mixed types. After each yaml file creation, use the `kubectl apply` command. +Use the following sections for YAML creation of StatefulSets with file system, raw block volume, and mixed volume modes. After each YAML file creation, use the `kubectl apply` command. ``` kubectl apply -f .yaml ``` -The `statefulset.apps/ created` message is emitted. +The `statefulset.apps/ created` message is emitted. ## Creating a StatefulSet with file system volumes -Create a StatefulSet yaml file, similar to the following demo-statefulset-file-system.yaml file. - -
-kind: StatefulSet
-apiVersion: apps/v1
-metadata:
-  name: demo-statefulset-file-system
-spec:
-  selector:
-    matchLabels:
-      app: demo-statefulset
-  serviceName: demo-statefulset
-  replicas: 1
-  template:
+Create a StatefulSet YAML file, similar to the following `demo-statefulset-file-system.yaml` file.
+
+Be sure to indicate the `volumeMounts`, listing each volume's name and path. In this example, the `mountPath` is listed as `"/data"`.
+
+    kind: StatefulSet
+    apiVersion: apps/v1
     metadata:
-      labels:
-        app: demo-statefulset
+      name: demo-statefulset-file-system
     spec:
-      containers:
-      - name: demo-container
-        image: registry.access.redhat.com/ubi8/ubi:latest
-        command: [ "/bin/sh", "-c", "--" ]
-        args: [ "while true; do sleep 30; done;" ]
-        volumeMounts:
+      selector:
+        matchLabels:
+          app: demo-statefulset
+      serviceName: demo-statefulset
+      replicas: 1
+      template:
+        metadata:
+          labels:
+            app: demo-statefulset
+        spec:
+          containers:
+          - name: demo-container
+            image: registry.access.redhat.com/ubi8/ubi:latest
+            command: [ "/bin/sh", "-c", "--" ]
+            args: [ "while true; do sleep 30; done;" ]
+            volumeMounts:
+              - name: demo-volume-file-system
+                mountPath: "/data"
+          volumes:
           - name: demo-volume-file-system
-            mountPath: "/data"
-      volumes:
-      - name: demo-volume-file-system
-        persistentVolumeClaim:
-          claimName: demo-pvc-file-system
-
+ persistentVolumeClaim: + claimName: demo-pvc-file-system ## Creating a StatefulSet with raw block volume -Create a StatefulSet yaml file, similar to the following demo-statefulset-raw-block.yaml file. - -
-kind: StatefulSet
-apiVersion: apps/v1
-metadata:
-  name: demo-statefulset-raw-block
-spec:
-  selector:
-    matchLabels:
-      app: demo-statefulset
-  serviceName: demo-statefulset
-  replicas: 1
-  template:
+Create a StatefulSet YAML file, similar to the following `demo-statefulset-raw-block.yaml` file.
+
+Be sure to indicate the `volumeDevices`, listing each volume's name and path. In this example, the `devicePath` is listed as `"/dev/block"`.
+
+    kind: StatefulSet
+    apiVersion: apps/v1
     metadata:
-      labels:
-        app: demo-statefulset
+      name: demo-statefulset-raw-block
     spec:
-      containers:
-      - name: demo-container
-        image: registry.access.redhat.com/ubi8/ubi:latest
-        command: [ "/bin/sh", "-c", "--" ]
-        args: [ "while true; do sleep 30; done;" ]
-        volumeDevices:
+      selector:
+        matchLabels:
+          app: demo-statefulset
+      serviceName: demo-statefulset
+      replicas: 1
+      template:
+        metadata:
+          labels:
+            app: demo-statefulset
+        spec:
+          containers:
+          - name: demo-container
+            image: registry.access.redhat.com/ubi8/ubi:latest
+            command: [ "/bin/sh", "-c", "--" ]
+            args: [ "while true; do sleep 30; done;" ]
+            volumeDevices:
+              - name: demo-volume-raw-block
+                devicePath: "/dev/block"
+          volumes:
           - name: demo-volume-raw-block
-            devicePath: "/dev/block"
-      volumes:
-      - name: demo-volume-raw-block
-        persistentVolumeClaim:
-          claimName: demo-pvc-raw-block
-
+ persistentVolumeClaim: + claimName: demo-pvc-raw-block ## Creating a StatefulSet with both raw block and file system volumes -Create a StatefulSet yaml file, similar to the following demo-statefulset-combined.yaml file. - -
-kind: StatefulSet
-apiVersion: apps/v1
-metadata:
-  name: demo-statefulset-combined
-spec:
-  selector:
-    matchLabels:
-      app: demo-statefulset
-  serviceName: demo-statefulset
-  replicas: 1
-  template:
+Create a StatefulSet YAML file, similar to the following `demo-statefulset.yaml` file.
+
+In a StatefulSet file that uses both volume modes, it is important to indicate both the `volumeMounts` and `volumeDevices` parameters.
+
+    kind: StatefulSet
+    apiVersion: apps/v1
     metadata:
-      labels:
-        app: demo-statefulset
+      name: demo-statefulset
     spec:
-      containers:
-      - name: demo-container
-        image: registry.access.redhat.com/ubi8/ubi:latest
-        command: [ "/bin/sh", "-c", "--" ]
-        args: [ "while true; do sleep 30; done;" ]
-        volumeMounts:
+      selector:
+        matchLabels:
+          app: demo-statefulset
+      serviceName: demo-statefulset
+      replicas: 1
+      template:
+        metadata:
+          labels:
+            app: demo-statefulset
+        spec:
+          containers:
+          - name: demo-container
+            image: registry.access.redhat.com/ubi8/ubi:latest
+            command: [ "/bin/sh", "-c", "--" ]
+            args: [ "while true; do sleep 30; done;" ]
+            volumeMounts:
+              - name: demo-volume-file-system
+                mountPath: "/data"
+            volumeDevices:
+              - name: demo-volume-raw-block
+                devicePath: "/dev/block"            
+          volumes:
           - name: demo-volume-file-system
-            mountPath: "/data"
-        volumeDevices:
+            persistentVolumeClaim:
+              claimName: demo-pvc-file-system
           - name: demo-volume-raw-block
-            devicePath: "/dev/block"            
-      volumes:
-      - name: demo-volume-file-system
-        persistentVolumeClaim:
-          claimName: demo-pvc-file-system
-      - name: demo-volume-raw-block
-        persistentVolumeClaim:
-          claimName: demo-pvc-raw-block
-
- + persistentVolumeClaim: + claimName: demo-pvc-raw-block diff --git a/docs/content/configuration/csi_ug_config_create_storageclasses.md b/docs/content/configuration/csi_ug_config_create_storageclasses.md index 52d3373d7..750dc8d3e 100644 --- a/docs/content/configuration/csi_ug_config_create_storageclasses.md +++ b/docs/content/configuration/csi_ug_config_create_storageclasses.md @@ -1,14 +1,16 @@ # Creating a StorageClass -Create a storage class yaml file in order to define the storage system pool name, secret reference, `SpaceEfficiency`, and `fstype`. +Create a storage class YAML file in order to define the storage parameters, such as pool name, secret reference, `SpaceEfficiency`, and `fstype`. + +**Note:** If you are using the CSI Topology feature, in addition to the information and parameter definitions provided here, be sure to follow the steps in [Creating a StorageClass with topology awareness](csi_ug_config_create_storageclasses_topology.md). Use the following procedure to create and apply the storage classes. **Note:** This procedure is applicable for both Kubernetes and Red Hat® OpenShift®. For Red Hat OpenShift, replace `kubectl` with `oc` in all relevant commands. -Create a storage class yaml file, similar to the following demo-storageclass.yaml. +Create a storage class YAML file, similar to the following `demo-storageclass.yaml` and update the storage parameters as needed. -Update the capabilities, pools, and array secrets, as needed. +When configuring the file, be sure to use the same array secret and array secret namespace as defined in [Creating a Secret](csi_ug_config_create_secret.md). Use the `SpaceEfficiency` parameters for each storage system, as defined in [the following table](#spaceefficiency). These values are not case-sensitive. @@ -17,8 +19,8 @@ _**Table:** `SpaceEfficiency` parameter definitions |Storage system type|SpaceEfficiency parameter options| |-------------------|---------------------------------| |IBM FlashSystem® A9000 and A9000R|Always includes deduplication and compression. No need to specify during configuration.| -|IBM Spectrum® Virtualize Family|- thick (default value)
- thin
- compressed
- deduplicated

**Note:** If not specified, the default value is thick.| -|IBM® DS8000® Family| - none (default value)
- thin

**Note:** If not specified, the default value is none.| +|IBM Spectrum® Virtualize Family|- `thick` (default value)
- `thin`
- `compressed`
- `deduplicated`

**Note:** If not specified, the default value is thick.| +|IBM® DS8000® Family| - `none` (default value)
- `thin`

**Note:** If not specified, the default value is `none`.| - The IBM DS8000 Family `pool` value is the pool ID and not the pool name as is used in other storage systems. - Be sure that the `pool` value is the name of an existing pool on the storage system. @@ -29,36 +31,29 @@ _**Table:** `SpaceEfficiency` parameter definitions - The `csi.storage.k8s.io/fstype` parameter is optional. The values that are allowed are _ext4_ or _xfs_. The default value is _ext4_. - The `volume_name_prefix` parameter is optional. -**Note:** For IBM DS8000 Family, the maximum prefix length is five characters. The maximum prefix length for other systems is 20 characters.

For storage systems that use Spectrum Virtualize, the `CSI_` prefix is added as default if not specified by the user. +**Note:** For IBM DS8000 Family, the maximum prefix length is 5 characters. The maximum prefix length for other systems is 20 characters.

For storage systems that use Spectrum Virtualize, the `CSI` prefix is added as default if not specified by the user. - kind: StorageClass - apiVersion: storage.k8s.io/v1 - metadata: - name: demo-storageclass - provisioner: block.csi.ibm.com - parameters: - SpaceEfficiency: deduplicated # Optional. - pool: demo-pool - - csi.storage.k8s.io/provisioner-secret-name: demo-secret - csi.storage.k8s.io/provisioner-secret-namespace: default - csi.storage.k8s.io/controller-publish-secret-name: demo-secret - csi.storage.k8s.io/controller-publish-secret-namespace: default - csi.storage.k8s.io/controller-expand-secret-name: demo-secret - csi.storage.k8s.io/controller-expand-secret-namespace: default - - csi.storage.k8s.io/fstype: xfs # Optional. Values ext4\xfs. The default is ext4. - volume_name_prefix: demoPVC # Optional. - allowVolumeExpansion: true + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: demo-storageclass + provisioner: block.csi.ibm.com + parameters: + pool: demo-pool + SpaceEfficiency: thin # Optional. + volume_name_prefix: demo-prefix # Optional. + + csi.storage.k8s.io/fstype: xfs # Optional. Values ext4/xfs. The default is ext4. + csi.storage.k8s.io/secret-name: demo-secret + csi.storage.k8s.io/secret-namespace: default + allowVolumeExpansion: true Apply the storage class. ``` - kubectl apply -f demo-storageclass.yaml + kubectl apply -f .yaml ``` -The `storageclass.storage.k8s.io/demo-storageclass created` message is emitted. - - +The `storageclass.storage.k8s.io/ created` message is emitted. \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_create_storageclasses_topology.md b/docs/content/configuration/csi_ug_config_create_storageclasses_topology.md new file mode 100644 index 000000000..14e87a773 --- /dev/null +++ b/docs/content/configuration/csi_ug_config_create_storageclasses_topology.md @@ -0,0 +1,46 @@ +# Creating a StorageClass with topology awareness + +When using the CSI Topology feature, different parameters must be taken into account when creating a storage class YAML file with specific `by_management_id` requirements. Use this information to help define a StorageClass that is topology aware. + +**Note:** For information and parameter definitions that are not related to topology awareness, be sure to see the information provided in [Creating a StorageClass](csi_ug_config_create_storageclasses.md), in addition to the current section. + +The StorageClass file must be defined to contain topology information, based off of the labels that were already defined on the nodes in the cluster (see [Compatibility and requirements](../installation/csi_ug_requirements.md)). This determines the storage pools that are then served as candidates for PersistentVolumeClaim (PVC) requests made, as well as the subset of nodes that can make use of the volumes provisioned by the CSI driver. + +With topology awareness, the StorageClass must have the `volumeBindingMode` set to `WaitForFirstConsumer` (as defined in the `.yaml` example below). This defines that any PVCs that are requested with this specific StorageClass, will wait to be configured until the CSI driver can see the worker node topology. + +The `by_management_id` parameter is optional and values such as the `pool`, `SpaceEfficiency`, and `volume_name_prefix` may all be specified. + +The various `by_management_id` parameters are chosen within the following hierarchical order: +1. From within the `by_management_id` parameter, per system (if specified). +2. Outside of the parameter, as a cross-system default (if not specified within the `by_management_id` parameter for the relevant `management-id`). + + + ``` +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: demo-storageclass-config-secret +provisioner: block.csi.ibm.com +volumeBindingMode: WaitForFirstConsumer +parameters: + # non-csi.storage.k8s.io parameters may be specified in by_management_id per system and/or outside by_management_id as the cross-system default. + + by_management_id: '{"demo-management-id-1":{"pool":"demo-pool-1","SpaceEfficiency":"deduplicated","volume_name_prefix":"demo-prefix-1"}, + "demo-management-id-2":{"pool":"demo-pool-2","volume_name_prefix":"demo-prefix-2"}}' # Optional. + pool: demo-pool + SpaceEfficiency: thin # Optional. + volume_name_prefix: demo-prefix # Optional. + + csi.storage.k8s.io/fstype: xfs # Optional. Values ext4/xfs. The default is ext4. + csi.storage.k8s.io/secret-name: demo-config-secret + csi.storage.k8s.io/secret-namespace: default +allowVolumeExpansion: true + ``` +Apply the storage class. + + ``` + kubectl apply -f .yaml + ``` +The `storageclass.storage.k8s.io/ created` message is emitted. + + diff --git a/docs/content/configuration/csi_ug_config_create_vol_replicationclass.md b/docs/content/configuration/csi_ug_config_create_vol_replicationclass.md new file mode 100644 index 000000000..a7355145a --- /dev/null +++ b/docs/content/configuration/csi_ug_config_create_vol_replicationclass.md @@ -0,0 +1,33 @@ +# Creating a VolumeReplicationClass + +Create a VolumeReplicationClass YAML file to enable volume replication. + +**Note:** Remote copy function is referred to as the more generic volume replication within this documentation set. Not all supported products use the remote-copy function terminology. + +In order to enable volume replication for your storage system, create a VolumeReplicationClass YAML file, similar to the following `demo-volumereplicationclass.yaml`. + +When configuring the file, be sure to use the same array secret and array secret namespace as defined in [Creating a Secret](csi_ug_config_create_secret.md). + +For information on obtaining your storage system `system_id`, see [Finding a `system_id`](csi_ug_config_replication_find_systemid.md). + +``` +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplicationClass +metadata: + name: demo-volumereplicationclass +spec: + provisioner: block.csi.ibm.com + parameters: + system_id: demo-system-id + copy_type: async # Optional. Values sync/async. The default is sync. + + replication.storage.openshift.io/replication-secret-name: demo-secret + replication.storage.openshift.io/replication-secret-namespace: default +``` + +After the YAML file is created, apply it by using the `kubectl apply -f` command. + +``` +kubectl apply -f .yaml +``` +The `volumereplicationclass.replication.storage.openshift.io/ created` message is emitted. \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_create_vol_snapshotclass.md b/docs/content/configuration/csi_ug_config_create_vol_snapshotclass.md index b0967e790..c72aae29a 100644 --- a/docs/content/configuration/csi_ug_config_create_vol_snapshotclass.md +++ b/docs/content/configuration/csi_ug_config_create_vol_snapshotclass.md @@ -2,37 +2,37 @@ Create a VolumeSnapshotClass YAML file to enable creation and deletion of volume snapshots. -**Note:** +**Note:** IBM® FlashCopy® function is referred to as the more generic volume snapshots and cloning within this documentation set. Not all supported products use the FlashCopy function terminology. -- IBM® FlashCopy® function is referred to as the more generic volume snapshots and cloning within this documentation set. Not all supported products use the FlashCopy function terminology. -- For volume snapshot support, the minimum orchestration platform version requirements are Red Hat® OpenShift® 4.4 and Kubernetes 1.17. - -In order to enable creation and deletion of volume snapshots for your storage system, create a VolumeSnapshotClass YAML file, similar to the following demo-snapshotclass.yaml. +In order to enable creation and deletion of volume snapshots for your storage system, create a VolumeSnapshotClass YAML file, similar to the following `demo-volumesnapshotclass.yaml`. When configuring the file, be sure to use the same array secret and array secret namespace as defined in [Creating a Secret](csi_ug_config_create_secret.md). - The `snapshot_name_prefix` parameter is optional. - **Note:** For IBM DS8000® Family, the maximum prefix length is five characters.
The maximum prefix length for other systems is 20 characters.
For storage systems using Spectrum Virtualize, the `CSI_` prefix is added as default if not specified by the user. + **Note:** For IBM DS8000® Family, the maximum prefix length is five characters.
The maximum prefix length for other systems is 20 characters.
For storage systems that use Spectrum Virtualize, the `CSI` prefix is added as default if not specified by the user. -- The `pool` parameter is not available on IBM FlashSystem A9000 and A9000R storage systems. For these storage systems the snapshot must be created on the same pool as the source. +- The `pool` parameter is not available on IBM FlashSystem A9000 and A9000R storage systems. For these storage systems, the snapshot must be created on the same pool as the source. -```screen +``` apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshotClass metadata: - name: demo-snapshotclass + name: demo-volumesnapshotclass driver: block.csi.ibm.com deletionPolicy: Delete parameters: + pool: demo-pool # Optional. Use to create the snapshot on a different pool than the source. + SpaceEfficiency: thin # Optional. Use to create the snapshot with a different space efficiency than the source. + snapshot_name_prefix: demo-prefix # Optional. + csi.storage.k8s.io/snapshotter-secret-name: demo-secret csi.storage.k8s.io/snapshotter-secret-namespace: default - snapshot_name_prefix: demoSnapshot # Optional. - pool: demo-pool # Optional. Use to create the snapshot on a different pool than the source. ``` After the YAML file is created, apply it by using the `kubectl apply -f` command. ``` kubectl apply -f .yaml -``` \ No newline at end of file +``` + The `volumesnapshotclass.snapshot.storage.k8s.io/ created` message is emitted. \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_create_vol_snapshotclass_topology.md b/docs/content/configuration/csi_ug_config_create_vol_snapshotclass_topology.md new file mode 100644 index 000000000..2946eb8af --- /dev/null +++ b/docs/content/configuration/csi_ug_config_create_vol_snapshotclass_topology.md @@ -0,0 +1,44 @@ +# Creating a VolumeSnapshotClass with topology awareness + +When using the CSI Topology feature, different parameters must be taken into account when creating a VolumeSnapshotClass YAML file with specific `by_management_id` requirements. Use this information to help define a VolumeSnapshotClass that is topology aware and enables the creation and deletion of volume snapshots. + +**Note:** + - For information and parameter definitions that are not related to topology awareness, be sure to see the information that is provided in [Creating a VolumeSnapshotClass](csi_ug_config_create_vol_snapshotclass.md), in addition to the current section. + + - IBM® FlashCopy® function is referred to as the more generic volume snapshots and cloning within this documentation set. Not all supported products use the FlashCopy function terminology. + +In order to enable creation and deletion of volume snapshots for your storage system, create a VolumeSnapshotClass YAML file, similar to the following `demo-volumesnapshotclass-config-secret.yaml`. + + The `by_management_id` parameter is optional and values such as the `pool`, `SpaceEfficiency`, and `volume_name_prefix` can all be specified. + +The various `by_management_id` parameters are chosen within the following hierarchical order: +1. From within the `by_management_id` parameter, per system (if specified). +2. Outside of the parameter, as a cross-system default (if not specified within the `by_management_id` parameter for the relevant `management-id`). + +``` +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: demo-volumesnapshotclass-config-secret +driver: block.csi.ibm.com +deletionPolicy: Delete +parameters: + # non-csi.storage.k8s.io parameters may be specified in by_management_id per system and/or outside by_management_id as the cross-system default. + + by_management_id: '{"demo-management-id-1":{"pool":"demo-pool-1","SpaceEfficiency":"deduplicated","snapshot_name_prefix":"demo-prefix-1"}, + "demo-management-id-2":{"pool":"demo-pool-2","snapshot_name_prefix":"demo-prefix-2"}}' # Optional. + pool: demo-pool # Optional. Use to create the snapshot on a different pool than the source. + SpaceEfficiency: thin # Optional. Use to create the snapshot with a different space efficiency than the source. + snapshot_name_prefix: demo-prefix # Optional. + + csi.storage.k8s.io/snapshotter-secret-name: demo-config-secret + csi.storage.k8s.io/snapshotter-secret-namespace: default +``` + +After the YAML file is created, apply it by using the `kubectl apply -f` command. + +``` +kubectl apply -f .yaml +``` + + The `volumesnapshotclass.snapshot.storage.k8s.io/ created` message is emitted. \ No newline at end of file diff --git a/docs/content/configuration/csi_ug_config_expand_pvc.md b/docs/content/configuration/csi_ug_config_expand_pvc.md index 3284667c2..e94b2a81a 100644 --- a/docs/content/configuration/csi_ug_config_expand_pvc.md +++ b/docs/content/configuration/csi_ug_config_expand_pvc.md @@ -2,9 +2,9 @@ Use this information to expand existing volumes. -**Important:** Before expanding an existing volume, be sure that the relevant StorageClass yaml `allowVolumeExpansion` parameter is set to true. For more information, see [Creating a StorageClass](csi_ug_config_create_storageclasses.md). +**Important:** Before expanding an existing volume, be sure that the relevant StorageClass `.yaml` `allowVolumeExpansion` parameter is set to true. For more information, see [Creating a StorageClass](csi_ug_config_create_storageclasses.md). -To expand an existing volume, open the relevant PersistentVolumeClaim (PVC) yaml file and increase the `storage` parameter value. For example, if the current `storage` value is set to _1Gi_, you can change it to _10Gi_, as needed. For more information about PVC configuration, see [Creating a PersistentVolumeClaim (PVC)](csi_ug_config_create_pvc.md). +To expand an existing volume, open the relevant PersistentVolumeClaim (PVC) YAML file and increase the `storage` parameter value. For example, if the current `storage` value is set to _1Gi_, you can change it to _10Gi_, as needed. For more information about PVC configuration, see [Creating a PersistentVolumeClaim (PVC)](csi_ug_config_create_pvc.md). Be sure to use the `kubectl apply` command in order to apply your changes. diff --git a/docs/content/configuration/csi_ug_config_replication_find_systemid.md b/docs/content/configuration/csi_ug_config_replication_find_systemid.md new file mode 100644 index 000000000..1e314e308 --- /dev/null +++ b/docs/content/configuration/csi_ug_config_replication_find_systemid.md @@ -0,0 +1,8 @@ +# Finding a `system_id` + +Find the remote storage system `system_id` parameter on your storage system in order to create a VolumeReplicationClass YAML file, enabling replication. + +For finding the `system_id` parameter on your Spectrum Virtualize storage system, use the `lspartnership` command. + +For more information, see **Command-line interface** > **Copy Service commands** > **lspartnership** within your specific product documentation on [IBM Docs](https://www.ibm.com/docs/en). + diff --git a/docs/content/configuration/csi_ug_config_topology.md b/docs/content/configuration/csi_ug_config_topology.md new file mode 100644 index 000000000..a7d09fe7d --- /dev/null +++ b/docs/content/configuration/csi_ug_config_topology.md @@ -0,0 +1,9 @@ +# Configuring for CSI Topology + +Use this information for specific configuring information when using CSI Topology with the IBM® block storage CSI driver. + +**Important:** Be sure that all of the topology requirements are met before starting. For more information, see [Compatibility and requirements](../installation/csi_ug_requirements.md). + +- [Creating a Secret with topology awareness](csi_ug_config_create_secret_topology.md) +- [Creating a StorageClass with topology awareness](csi_ug_config_create_storageclasses_topology.md) +- [Creating a VolumeSnapshotClass with topology awareness](csi_ug_config_create_vol_snapshotclass_topology.md) \ No newline at end of file diff --git a/docs/content/csi_overview.md b/docs/content/csi_overview.md index b16250df4..4d5d9a8b6 100644 --- a/docs/content/csi_overview.md +++ b/docs/content/csi_overview.md @@ -8,8 +8,8 @@ By leveraging CSI (Container Storage Interface) drivers for IBM storage systems, IBM storage orchestration for containers includes the following driver types for storage provisioning: -- The IBM block storage CSI driver, for block storage (documented here). -- The IBM Spectrum® Scale CSI driver, for file storage. For specific Spectrum Scale and Spectrum Scale CSI driver product information, see [IBM Spectrum Scale documentation](https://www.ibm.com/docs/en/spectrum-scale/). +- The IBM block storage CSI driver, for block storage (documented here). +- The IBM Spectrum® Scale CSI driver, for file storage. For specific Spectrum Scale and Spectrum Scale CSI driver product information, see [IBM Spectrum Scale documentation](https://www.ibm.com/docs/en/spectrum-scale/). For details about volume provisioning with Kubernetes, refer to [Persistent volumes on Kubernetes](https://kubernetes.io/docs/concepts/storage/volumes/). diff --git a/docs/content/installation/csi_ug_install_operator_github.md b/docs/content/installation/csi_ug_install_operator_github.md index bc87eed3f..ef71d8ce8 100644 --- a/docs/content/installation/csi_ug_install_operator_github.md +++ b/docs/content/installation/csi_ug_install_operator_github.md @@ -4,28 +4,30 @@ The operator for IBM® block storage CSI driver can be installed directly with G Use the following steps to install the operator and driver, with [GitHub](https://github.com/IBM/ibm-block-csi-operator) (github.com/IBM/ibm-block-csi-operator). -**Note:** Before you begin, you may need to create a user-defined namespace. Create the project namespace, using the `kubectl create ns ` command. +**Note:** Before you begin, it is best practice to create a user-defined namespace. Create the project namespace, using the `kubectl create ns ` command. 1. Install the operator. 1. Download the manifest from GitHub. ``` - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.6.0/deploy/installer/generated/ibm-block-csi-operator.yaml > ibm-block-csi-operator.yaml + curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.7.0/deploy/installer/generated/ibm-block-csi-operator.yaml > ibm-block-csi-operator.yaml ``` - 2. **Optional:** Update the image fields in the ibm-block-csi-operator.yaml. + 2. (Optional) Update the image fields in the `ibm-block-csi-operator.yaml`. - 3. Install the operator, using a user-defined namespace. + **Note:** If a user-defined namespace was created, edit the namespace from `default` to ``. + + 3. Install the operator. ``` - kubectl -n apply -f ibm-block-csi-operator.yaml + kubectl apply -f ibm-block-csi-operator.yaml ``` 4. Verify that the operator is running. (Make sure that the Status is _Running_.) - ```screen - $ kubectl get pod -l app.kubernetes.io/name=ibm-block-csi-operator -n + ``` + $> kubectl get pod -l app.kubernetes.io/name=ibm-block-csi-operator -n NAME READY STATUS RESTARTS AGE ibm-block-csi-operator-5bb7996b86-xntss 1/1 Running 0 10m ``` @@ -35,22 +37,24 @@ Use the following steps to install the operator and driver, with [GitHub](https: 1. Download the manifest from GitHub. ``` - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.6.0/deploy/crds/csi.ibm.com_v1_ibmblockcsi_cr.yaml > csi.ibm.com_v1_ibmblockcsi_cr.yaml + curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.7.0/config/samples/csi.ibm.com_v1_ibmblockcsi_cr.yaml > csi.ibm.com_v1_ibmblockcsi_cr.yaml ``` - 2. **Optional:** Update the image repository field, tag field, or both in the csi.ibm.com_v1_ibmblockcsi_cr.yaml. + 2. (Optional) Update the image repository field, tag field, or both in the `csi.ibm.com_v1_ibmblockcsi_cr.yaml`. - 3. Install the csi.ibm.com_v1_ibmblockcsi_cr.yaml. + **Note:** If a user-defined namespace was created, edit the namespace from `default` to ``. + + 3. Install the `csi.ibm.com_v1_ibmblockcsi_cr.yaml`. ``` - kubectl -n apply -f csi.ibm.com_v1_ibmblockcsi_cr.yaml + kubectl apply -f csi.ibm.com_v1_ibmblockcsi_cr.yaml ``` 4. Verify that the driver is running: - ```bash - $ kubectl get pods -n -l csi + ``` + $> kubectl get pods -n -l csi NAME READY STATUS RESTARTS AGE - ibm-block-csi-controller-0 6/6 Running 0 9m36s + ibm-block-csi-controller-0 7/7 Running 0 9m36s ibm-block-csi-node-jvmvh 3/3 Running 0 9m36s ibm-block-csi-node-tsppw 3/3 Running 0 9m36s ibm-block-csi-operator-5bb7996b86-xntss 1/1 Running 0 10m diff --git a/docs/content/installation/csi_ug_install_operator_openshift.md b/docs/content/installation/csi_ug_install_operator_openshift.md index 2e14f1eae..b9a95dfaa 100644 --- a/docs/content/installation/csi_ug_install_operator_openshift.md +++ b/docs/content/installation/csi_ug_install_operator_openshift.md @@ -38,9 +38,9 @@ The Red Hat OpenShift Container Platform uses the following `SecurityContextCons 10. Click **Create Instance** to create the IBM block storage CSI driver (`IBMBlockCSI`). - A yaml file opens in the web console. This file can be left as-is, or edited as needed. + A YAML file opens in the web console. This file can be left as-is, or edited as needed. -11. Update the yaml file to include your user-defined namespace. +11. Update the YAML file to include your user-defined namespace. 12. Click **Create**. diff --git a/docs/content/installation/csi_ug_install_operator_operatorhub.md b/docs/content/installation/csi_ug_install_operator_operatorhub.md index 9d956e8ba..cd5a82cf9 100644 --- a/docs/content/installation/csi_ug_install_operator_operatorhub.md +++ b/docs/content/installation/csi_ug_install_operator_operatorhub.md @@ -4,4 +4,4 @@ When using OperatorHub.io, the operator for IBM® block storage CSI driver can b To install the CSI driver from OperatorHub.io, go to https://operatorhub.io/operator/ibm-block-csi-operator-community and follow the installation instructions, once clicking the **Install** button. -**Note:** To ensure that the operator installs the driver, be sure to apply the yaml that is located as part of the ibm-block-csi-operator-community page mentioned above. +**Note:** To ensure that the operator installs the driver, be sure to apply the YAML file that is located as part of the ibm-block-csi-operator-community page mentioned above. diff --git a/docs/content/installation/csi_ug_requirements.md b/docs/content/installation/csi_ug_requirements.md index bb86dbfe3..53e8260ea 100644 --- a/docs/content/installation/csi_ug_requirements.md +++ b/docs/content/installation/csi_ug_requirements.md @@ -1,12 +1,12 @@ # Compatibility and requirements -For the complete and up-to-date information about the compatibility and requirements for using the IBM® block storage CSI driver, refer to its latest release notes. The release notes detail supported operating system and container platform versions, as well as microcode versions of the supported storage systems. +For the complete and up-to-date information about the compatibility and requirements for using the IBM® block storage CSI driver, refer to its latest release notes. The release notes detail supported operating system and container platform versions, and microcode versions of the supported storage systems. Before beginning the installation of the CSI (Container Storage Interface) driver, be sure to verify that you comply with the following prerequisites. For IBM Cloud® Satellite users, see [cloud.ibm.com/docs/satellite](https://cloud.ibm.com/docs/satellite) for full system requirements. -**Important:** When using Satellite, complete the following checks, configurations, and the installation process before assigning the hosts to your locations.
In addition, **do not** create a Kubernetes cluster. This is done through Satellite. +**Important:** When using Satellite, complete the following checks, configurations, and the installation process before assigning the hosts to your locations.
In addition, **do not** create a Kubernetes cluster. Creating the Kubernetes cluster is done through Satellite. - The CSI driver requires the following ports to be opened on the worker nodes OS firewall: - **For all iSCSI users** @@ -17,7 +17,7 @@ For IBM Cloud® Satellite users, see [cloud.ibm.com/docs/satellite](https://clou Port 7778 - - **IBM Spectrum® Virtualize Family includes IBM® SAN Volume Controller and IBM FlashSystem® family members built with IBM Spectrum® Virtualize (including FlashSystem 5xxx, 7200, 9100, 9200, 9200R)** + - **IBM Spectrum® Virtualize Family includes IBM® SAN Volume Controller and IBM FlashSystem® family members that are built with IBM Spectrum® Virtualize (including FlashSystem 5xxx, 7200, 9100, 9200, 9200R)** Port 22 @@ -27,17 +27,15 @@ For IBM Cloud® Satellite users, see [cloud.ibm.com/docs/satellite](https://clou - Be sure that multipathing is installed and running. -Perform these steps for each worker node in Kubernetes cluster to prepare your environment for installing the CSI (Container Storage Interface) driver. +Complete these steps for each worker node in Kubernetes cluster to prepare your environment for installing the CSI (Container Storage Interface) driver. -1. **For RHEL OS users:** Ensure iSCSI connectivity. If using RHCOS or if the packages are already installed, skip this step and continue to step 2. - -2. Configure Linux® multipath devices on the host. +1. Configure Linux® multipath devices on the host. **Important:** Be sure to configure each worker with storage connectivity according to your storage system instructions. For more information, find your storage system documentation in [IBM Documentation](http://www.ibm.com/docs/). **Additional configuration steps for OpenShift® Container Platform users (RHEL and RHCOS).** Other users can continue to step 3. - Download and save the following yaml file: + Download and save the following YAML file: ``` curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/master/deploy/99-ibm-attach.yaml > 99-ibm-attach.yaml @@ -45,25 +43,53 @@ Perform these steps for each worker node in Kubernetes cluster to prepare your e This file can be used for both Fibre Channel and iSCSI configurations. To support iSCSI, uncomment the last two lines in the file. - **Important:** The 99-ibm-attach.yaml configuration file overrides any files that already exist on your system. Only use this file if the files mentioned are not already created.
If one or more have been created, edit this yaml file, as necessary. + **Important:** The `99-ibm-attach.yaml` configuration file overrides any files that exist on your system. Only use this file if the files mentioned are not already created.
If one or more were created, edit this YAML file, as necessary. - Apply the yaml file. + Apply the YAML file. `oc apply -f 99-ibm-attach.yaml` - -3. If needed, enable support for volume snapshots (FlashCopy® function) on your Kubernetes cluster. - For more information and instructions, see the Kubernetes blog post, [Kubernetes 1.20: Kubernetes Volume Snapshot Moves to GA](https://kubernetes.io/blog/2020/12/10/kubernetes-1.20-volume-snapshot-moves-to-ga/). +2. Configure storage system connectivity. - Install both the Snapshot CRDs and the Common Snapshot Controller once per cluster. + 1. Define the host of each Kubernetes node on the relevant storage systems with the valid WWPN (for Fibre Channel) or IQN (for iSCSI) of the node. - The instructions and relevant yaml files to enable volume snapshots can be found at: [https://github.com/kubernetes-csi/external-snapshotter#usage](https://github.com/kubernetes-csi/external-snapshotter#usage) + 2. For Fibre Channel, configure the relevant zoning from the storage to the host. -4. Configure storage system connectivity. + 3. Ensure proper connectivity. - 1. Define the host of each Kubernetes node on the relevant storage systems with the valid WWPN (for Fibre Channel) or IQN (for iSCSI) of the node. +3. **For RHEL OS users:** Ensure that the following packages are installed. - 2. For Fibre Channel, configure the relevant zoning from the storage to the host. + If using RHCOS or if the packages are already installed, this step may be skipped. + + - sg3_utils + - iscsi-initiator-utils + - device-mapper-multipath + - xfsprogs (if XFS file system is required) +4. (Optional) If planning on using volume snapshots (FlashCopy® function), enable support on your Kubernetes cluster. + + For more information and instructions, see the Kubernetes blog post, [Kubernetes 1.20: Kubernetes Volume Snapshot Moves to GA](https://kubernetes.io/blog/2020/12/10/kubernetes-1.20-volume-snapshot-moves-to-ga/). + + Install both the Snapshot CRDs and the Common Snapshot Controller once per cluster. + + The instructions and relevant YAML files to enable volume snapshots can be found at: [https://github.com/kubernetes-csi/external-snapshotter#usage](https://github.com/kubernetes-csi/external-snapshotter#usage) + +5. (Optional) If planning on using volume replication (remote copy function), enable support on your orchestration platform cluster and storage system. + + 1. To enable support on your Kubernetes cluster, install the following replication CRDs once per cluster. + + ``` + curl -O https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplicationclasses.yaml + kubectl apply -f ./replication.storage.openshift.io_volumereplicationclasses.yaml + + curl -O https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplications.yaml + kubectl apply -f ./replication.storage.openshift.io_volumereplications.yaml + ```` + + 2. To enable support on your storage system, see the following section within your Spectrum Virtualize product documentation on [IBM Documentation](https://www.ibm.com/docs/en/): **Administering** > **Managing Copy Services** > **Managing remote-copy partnerships**. +6. (Optional) To use CSI Topology, at least one node in the cluster must have the label-prefix of `topology.block.csi.ibm.com` to introduce topology awareness: + + **Important:** This label-prefix must be found on the nodes in the cluster **before** installing the IBM® block storage CSI driver. If the nodes do not have the proper label-prefix before installation, CSI Topology cannot be used with the CSI driver. + For more information, see [Configuring for CSI Topology](../configuration/csi_ug_config_topology.md). \ No newline at end of file diff --git a/docs/content/installation/csi_ug_uninstall_github.md b/docs/content/installation/csi_ug_uninstall_github.md index 59a12e8b5..ecd0bb194 100644 --- a/docs/content/installation/csi_ug_uninstall_github.md +++ b/docs/content/installation/csi_ug_uninstall_github.md @@ -3,16 +3,16 @@ Use this information to uninstall the IBM® CSI (Container Storage Interface) operator and driver with GitHub. Perform the following steps in order to uninstall the CSI driver and operator. -1. Delete the IBMBlockCSI custom resource. +1. Delete the IBMBlockCSI custom resource. ``` - kubectl -n delete -f csi.ibm.com_v1_ibmblockcsi_cr.yaml + kubectl delete -f csi.ibm.com_v1_ibmblockcsi_cr.yaml ``` -2. Delete the operator. +2. Delete the operator. ``` - kubectl -n delete -f ibm-block-csi-operator.yaml + kubectl delete -f ibm-block-csi-operator.yaml ``` diff --git a/docs/content/installation/csi_ug_uninstall_operatorhub.md b/docs/content/installation/csi_ug_uninstall_operatorhub.md index 55b059144..122c4c0b4 100644 --- a/docs/content/installation/csi_ug_uninstall_operatorhub.md +++ b/docs/content/installation/csi_ug_uninstall_operatorhub.md @@ -2,6 +2,6 @@ Use this information to uninstall the IBM® CSI (Container Storage Interface) operator and driver with OperatorHub.io. -To uninstall the CSI driver with OperatorHub.io, use the `kubectl delete -f` command to delete the yaml files, one at a time, in the reverse order of the installation steps that are documented in https://operatorhub.io/operator/ibm-block-csi-operator-community. +To uninstall the CSI driver with OperatorHub.io, use the `kubectl delete -f` command to delete the YAML files, one at a time, in the reverse order of the installation steps that are documented in https://operatorhub.io/operator/ibm-block-csi-operator-community. **Note:** To see the installation steps, click **Install** on the OperatorHub.io webpage. \ No newline at end of file diff --git a/docs/content/installation/csi_ug_upgrade.md b/docs/content/installation/csi_ug_upgrade.md index 95a8330c1..85d7ee7fd 100644 --- a/docs/content/installation/csi_ug_upgrade.md +++ b/docs/content/installation/csi_ug_upgrade.md @@ -2,7 +2,7 @@ Use this information to upgrade the IBM® block storage CSI driver. -- The OpenShift web console and OperatorHub.io both automatically upgrade the the CSI (Container Storage Interface) driver when a new version is released. +- The OpenShift web console and OperatorHub.io both automatically upgrade the CSI (Container Storage Interface) driver when a new version is released. - With OpenShift web console, the **Approval Strategy** must be set to **Automatic**. To check if your operator is running at the latest release level, from the OpenShift web console, browse to **Operators** > **Installed Operators**. Check the status of the Operator for IBM block storage CSI driver. Ensure that the **Upgrade Status** is _Up to date_. diff --git a/docs/content/release_notes/csi_rn_changelog_1.5.1.md b/docs/content/release_notes/csi_rn_changelog_1.5.1.md new file mode 100644 index 000000000..53ce7a615 --- /dev/null +++ b/docs/content/release_notes/csi_rn_changelog_1.5.1.md @@ -0,0 +1,3 @@ +# 1.5.1 (July 2021) + +IBM® block storage CSI driver 1.5.1 was a maintenance release, adding improved Red Hat OpenShift 4.6 integration. \ No newline at end of file diff --git a/docs/content/release_notes/csi_rn_changelog_1.6.0.md b/docs/content/release_notes/csi_rn_changelog_1.6.0.md index fd01997ab..aa3d9b4f1 100644 --- a/docs/content/release_notes/csi_rn_changelog_1.6.0.md +++ b/docs/content/release_notes/csi_rn_changelog_1.6.0.md @@ -1,3 +1,3 @@ # 1.6.0 (June 2021) -IBM® block storage CSI driver 1.6.0 adds additional support for Kubernetes 1.21 and Red Hat® OpenShift® 4.8. \ No newline at end of file +IBM® block storage CSI driver 1.6.0 added additional support for Kubernetes 1.21 and Red Hat® OpenShift® 4.8. \ No newline at end of file diff --git a/docs/content/release_notes/csi_rn_changelog_1.7.0.md b/docs/content/release_notes/csi_rn_changelog_1.7.0.md new file mode 100644 index 000000000..c5df058a1 --- /dev/null +++ b/docs/content/release_notes/csi_rn_changelog_1.7.0.md @@ -0,0 +1,6 @@ +# 1.7.0 (September 2021) + +IBM® block storage CSI driver 1.7.0 adds new support and enhancements: +- Now supports the CSI Topology feature +- New volume replication (remote copy) support for IBM Spectrum Virtualize Family storage systems +- Additional support for Kubernetes 1.22 \ No newline at end of file diff --git a/docs/content/release_notes/csi_rn_compatibility.md b/docs/content/release_notes/csi_rn_compatibility.md index e7b79702e..2952aebb1 100644 --- a/docs/content/release_notes/csi_rn_compatibility.md +++ b/docs/content/release_notes/csi_rn_compatibility.md @@ -1,3 +1,3 @@ # Compatibility and requirements -This section specifies the compatibility and requirements of version 1.6.0 of IBM® block storage CSI driver. +This section specifies the compatibility and requirements of version 1.7.0 of IBM® block storage CSI driver. diff --git a/docs/content/release_notes/csi_rn_edition_notice.md b/docs/content/release_notes/csi_rn_edition_notice.md index fc22cb546..6d4b68466 100644 --- a/docs/content/release_notes/csi_rn_edition_notice.md +++ b/docs/content/release_notes/csi_rn_edition_notice.md @@ -1,4 +1,4 @@ -# First Edition (June 2021) +# First Edition (September 2021) -This edition applies to version 1.6.0 of the IBM® block storage CSI driver software package. Newer document editions may be issued for the same product version in order to add missing information, update information, or amend typographical errors. The edition is reset to 'First Edition' for every new product version. +This edition applies to version 1.7.0 of the IBM® block storage CSI driver software package. Newer document editions may be issued for the same product version in order to add missing information, update information, or amend typographical errors. The edition is reset to 'First Edition' for every new product version. diff --git a/docs/content/release_notes/csi_rn_knownissues.md b/docs/content/release_notes/csi_rn_knownissues.md index dd39b1bf4..1874de6c1 100644 --- a/docs/content/release_notes/csi_rn_knownissues.md +++ b/docs/content/release_notes/csi_rn_knownissues.md @@ -1,6 +1,6 @@ # Known issues -This section details the known issues in IBM® block storage CSI driver 1.6.0, along with possible solutions or workarounds (if available). +This section details the known issues in IBM® block storage CSI driver 1.7.0, along with possible solutions or workarounds (if available). The following severity levels apply to known issues: @@ -12,11 +12,12 @@ The following severity levels apply to known issues: **Important:** -- **The issues listed below apply to IBM block storage CSI driver 1.6.0**. As long as a newer version has not yet been released, a newer release notes edition for IBM block storage CSI driver 1.6.0 might be issued to provide a more updated list of known issues and workarounds. -- When a newer version is released for general availability, the release notes of this version will no longer be updated. Accordingly, check the release notes of the newer version to learn whether any newly discovered issues affect IBM block storage CSI driver 1.6.0 or whether the newer version resolves any of the issues listed below. +- **The issues listed below apply to IBM block storage CSI driver 1.7.0**. As long as a newer version has not yet been released, a newer release notes edition for IBM block storage CSI driver 1.7.0 might be issued to provide a more updated list of known issues and workarounds. +- When a newer version is released for general availability, the release notes of this version will no longer be updated. Accordingly, check the release notes of the newer version to learn whether any newly discovered issues affect IBM block storage CSI driver 1.7.0 or whether the newer version resolves any of the issues listed below. |Ticket ID|Severity|Description| |---------|--------|-----------| +|**CSI-3382**|Service|After CSI Topology label deletion, volume provisioning does not work, even when not using any topology-aware YAML files.
**Workaround:** To allow volume provisioning through the CSI driver, delete the operator pod.
After the deletion, a new operator pod is created and the controller pod is automatically restarted, allowing for volume provisioning.| |**CSI-2157**|Service|In extremely rare cases, too many Fibre Channel worker node connections may result in a failure when the CSI driver attempts to attach a pod. As a result, the `Host for node: {0} was not found, ensure all host ports are configured on storage` error message may be found in the IBM block storage CSI driver controller logs.
**Workaround:** Ensure that all host ports are properly configured on the storage system. If the issue continues and the CSI driver can still not attach a pod, contact IBM Support.| |**CSI-702**|Service|Modifying the controller or node **affinity** settings may not take effect.
**Workaround:** If needed, delete the controller StatefulSet and/or the DaemonSet node after modifying the **affinity** settings in the IBMBlockCSI custom resource.| diff --git a/docs/content/release_notes/csi_rn_limitations.md b/docs/content/release_notes/csi_rn_limitations.md index c769f8b15..b766a6f55 100644 --- a/docs/content/release_notes/csi_rn_limitations.md +++ b/docs/content/release_notes/csi_rn_limitations.md @@ -4,7 +4,7 @@ As opposed to known issues, limitations are functionality restrictions that are ## IBM® DS8000® usage limitations -When using the CSI (Container Storage Interface) driver with DS8000 Family products, connectivity limit on the storage side may be reached because of too many open connections. This occurs due to connection closing lag times from the storage side. +Connectivity limits on the storage side might be reached with DS8000 Family products due to too many open connections. This occurs due to connection closing lag times from the storage side. ## Volume snapshot limitations @@ -28,5 +28,17 @@ The following limitations apply when using volume clones with the IBM block stor The following limitations apply when expanding volumes with the IBM block storage CSI driver: - When using the CSI driver with IBM Spectrum Virtualize Family and IBM DS8000 Family products, during size expansion of a PersistentVolumeClaim (PVC), the size remains until all snapshots of the specific PVC are deleted. -- When expanding a PVC while not in use by a pod, the volume size immediately increases on the storage side. PVC size only increases, however, after a pod begins to use the PVC. -- When expanding a filesystem PVC for a volume that was previously formatted but is now no longer being used by a pod, any copy or replication operations performed on the PVC (such as snapshots or cloning, and so on) results in a copy with the newer, larger, size on the storage. However, its filesystem has the original, smaller, size. \ No newline at end of file +- When expanding a PVC while not in use by a pod, the volume size immediately increases on the storage side. However, PVC size only increases after a pod uses the PVC. +- When expanding a filesystem PVC for a volume that was previously formatted but is now no longer being used by a pod, any copy or replication operations performed on the PVC (such as snapshots or cloning) results in a copy with the newer, larger, size on the storage. However, its filesystem has the original, smaller, size. + +## Volume replication limitations + +When a role switch is conducted, this is not reflected within the other orchestration platform replication objects. + +**Important:** When using volume replication on volumes that were created with a driver version lower than 1.7.0: + + 1. Change the reclaim policy of the relevant PersistentVolumes to `Retain`. + 2. Delete the relevant PersistentVolumes. + 3. Import the volumes, by using the latest import procedure (version 1.7.0 or later) (see **CSI driver configuration** > **Advanced configuration** > **Importing an existing volume** in the user information). + + For more information, see the [Change the Reclaim Policy of a PersistentVolume](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/) information in the Kubernetes documentation. \ No newline at end of file diff --git a/docs/content/release_notes/csi_rn_supported_orchestration.md b/docs/content/release_notes/csi_rn_supported_orchestration.md index 31b1d9c8c..1c783ef58 100644 --- a/docs/content/release_notes/csi_rn_supported_orchestration.md +++ b/docs/content/release_notes/csi_rn_supported_orchestration.md @@ -4,12 +4,13 @@ The following table details orchestration platforms suitable for deployment of t |Orchestration platform|Version|Architecture| |----------------------|-------|------------| -|Kubernetes|1.20|x86| |Kubernetes|1.21|x86| -|Red Hat® OpenShift®|4.7|x86, IBM Z®, IBM Power Systems™1| +|Kubernetes|1.22|x86| |Red Hat OpenShift|4.8|x86, IBM Z, IBM Power Systems1| -1IBM Power Systems architecture is only supported on Spectrum Virtualize Family storage systems. +1IBM Power Systems architecture is only supported on Spectrum Virtualize and DS8000 Family storage systems. -**Note:** As of this document's publication date, IBM Cloud® Satellite only supports RHEL 7 on x86 architecture for Red Hat OpenShift. For the latest support information, see [cloud.ibm.com/docs/satellite](https://cloud.ibm.com/docs/satellite). +**Note:** +- As of this document's publication date, IBM Cloud® Satellite only supports RHEL 7 on x86 architecture for Red Hat OpenShift. For the latest support information, see [cloud.ibm.com/docs/satellite](https://cloud.ibm.com/docs/satellite). +- For the latest orchestration platform support information, see the [Lifecycle and support matrix](https://www.ibm.com/docs/en/stg-block-csi-driver?topic=SSRQ8T/landing/csi_lifecycle_support_matrix.html). diff --git a/docs/content/release_notes/csi_rn_supported_os.md b/docs/content/release_notes/csi_rn_supported_os.md index 3d48795c6..0acdf260c 100644 --- a/docs/content/release_notes/csi_rn_supported_os.md +++ b/docs/content/release_notes/csi_rn_supported_os.md @@ -5,9 +5,10 @@ The following table lists operating systems required for deployment of the IBM® |Operating system|Architecture| |----------------|------------| |Red Hat® Enterprise Linux® (RHEL) 7.x|x86, IBM Z®| -|Red Hat Enterprise Linux CoreOS (RHCOS)|x86, IBM Z®2, IBM Power Systems™1| +|Red Hat Enterprise Linux CoreOS (RHCOS)|x86, IBM Z, IBM Power Systems™1| -1IBM Power Systems architecture is only supported on Spectrum Virtualize Family storage systems.
-2IBM Z and IBM Power Systems architectures are only supported using CLI installation. +1IBM Power Systems architecture is only supported on Spectrum Virtualize and DS8000 Family storage systems. + +**Note:** For the latest operating system support information, see the [Lifecycle and support matrix](https://www.ibm.com/docs/en/stg-block-csi-driver?topic=SSRQ8T/landing/csi_lifecycle_support_matrix.html). diff --git a/docs/content/release_notes/csi_rn_supported_storage.md b/docs/content/release_notes/csi_rn_supported_storage.md index db25f75bb..c65710854 100644 --- a/docs/content/release_notes/csi_rn_supported_storage.md +++ b/docs/content/release_notes/csi_rn_supported_storage.md @@ -1,18 +1,18 @@ # Supported storage systems -IBM® block storage CSI driver 1.6.0 supports different IBM storage systems as listed in the following table. +IBM® block storage CSI driver 1.7.0 supports different IBM storage systems as listed in the following table. |Storage system|Microcode version| |--------------|-----------------| -|IBM FlashSystem™ A9000|12.x| -|IBM FlashSystem A9000R|12.x| +|IBM FlashSystem™ A9000|12.3.0.a or later| +|IBM FlashSystem A9000R|12.3.0.a or later| |IBM Spectrum Virtualize™ Family including IBM SAN Volume Controller (SVC) and IBM FlashSystem® family members built with IBM Spectrum® Virtualize (including FlashSystem 5xxx, 7200, 9100, 9200, 9200R)|7.8 and above, 8.x| |IBM Spectrum Virtualize as software only|7.8 and above, 8.x| |IBM DS8000® Family|8.x and higher with same API interface| **Note:** -- Newer microcode versions may also be compatible. When a newer microcode version becomes available, contact IBM Support to inquire whether the new microcode version is compatible with the current version of the CSI driver. -- The IBM Spectrum Virtualize Family and IBM SAN Volume Controller storage systems run the IBM Spectrum Virtualize software. In addition, IBM Spectrum Virtualize package is available as a deployable solution that can be run on any compatible hardware. +- For the latest microcode storage support information, see the [Lifecycle and support matrix](https://www.ibm.com/docs/en/stg-block-csi-driver?topic=SSRQ8T/landing/csi_lifecycle_support_matrix.html). +- The IBM Spectrum Virtualize Family and IBM SAN Volume Controller storage systems run the IBM Spectrum Virtualize software. In addition, IBM Spectrum Virtualize package is available as a deployable solution that can be run on any compatible hardware. diff --git a/docs/content/release_notes/csi_rn_whatsnew.md b/docs/content/release_notes/csi_rn_whatsnew.md index 3335a6488..65e20822c 100644 --- a/docs/content/release_notes/csi_rn_whatsnew.md +++ b/docs/content/release_notes/csi_rn_whatsnew.md @@ -1,12 +1,22 @@ -# What's new in 1.6.0 +# What's new in 1.7.0 -IBM® block storage CSI driver 1.6.0 introduces the enhancements detailed in the following section. +IBM® block storage CSI driver 1.7.0 introduces the enhancements that are detailed in the following section. -**General availability date**: 18 June 2021 +**General availability date:** 30 September 2021 -## Additional supported orchestration platforms for deployment +## Now supports CSI Topology -This version adds support for orchestration platforms Kubernetes 1.21 and Red Hat OpenShift 4.8, suitable for deployment of the CSI (Container Storage Interface) driver. +IBM® block storage CSI driver 1.7.0 is now topology aware. Using this feature, volume access can be limited to a subset of nodes, based on regions and availability zones. Nodes can be located in various regions within an availability zone, or across the different availability zones. Using CSI Topology feature can ease volume provisioning for workloads within a multi-zone architecture. + +For more information, see [CSI Topology Feature](https://kubernetes-csi.github.io/docs/topology.html). + +## New volume replication support for IBM Spectrum Virtualize Family storage systems + +When using IBM Spectrum Virtualize Family storage systems, the CSI driver now supports volume replication (remote copy). + +## Additional support for Kubernetes 1.22 orchestration platforms for deployment + +This version adds support for orchestration platform Kubernetes 1.22, suitable for deployment of the CSI (Container Storage Interface) driver. diff --git a/docs/content/troubleshooting/csi_ug_troubleshooting_detect_errors.md b/docs/content/troubleshooting/csi_ug_troubleshooting_detect_errors.md deleted file mode 100644 index f257cbc2d..000000000 --- a/docs/content/troubleshooting/csi_ug_troubleshooting_detect_errors.md +++ /dev/null @@ -1,21 +0,0 @@ -# Detecting errors - -Use this information to help pinpoint potential causes for stateful pod failure. - -This is an overview of actions that you can take to pinpoint a potential cause for a stateful pod failure. - -**Note:** This procedures is applicable for both Kubernetes and Red Hat® OpenShift®. For Red Hat OpenShift, replace `kubectl` with `oc` in all relevant commands. - -1. Verify that the CSI driver is running. (Make sure the `csi-controller` pod status is _Running_). - - ``` - $> kubectl get all -n -l csi - ``` - -2. If `pod/ibm-block-csi-controller-0` is not in a _Running_ state, run the following command: - - ``` - kubectl describe -n pod/ibm-block-csi-controller-0 - ``` - - View the logs (see [Log collection](csi_ug_troubleshooting_logs.md)). diff --git a/docs/content/troubleshooting/csi_ug_troubleshooting_logs.md b/docs/content/troubleshooting/csi_ug_troubleshooting_logs.md index 5e04c06de..fa19d2387 100644 --- a/docs/content/troubleshooting/csi_ug_troubleshooting_logs.md +++ b/docs/content/troubleshooting/csi_ug_troubleshooting_logs.md @@ -1,12 +1,12 @@ -# Log collection +# Log and status collection -Use the CSI (Container Storage Interface) driver logs for problem identification. +Use the CSI (Container Storage Interface) driver debug information for problem identification. **Note:** These procedures are applicable for both Kubernetes and Red Hat® OpenShift®. For Red Hat OpenShift, replace `kubectl` with `oc` in all relevant commands. -To collect and display logs, related to the different components of IBM® block storage CSI driver, use the following Kubernetes commands: +To collect and display status and logs related to the different components of IBM® block storage CSI driver, use the following Kubernetes commands: -## Log collection for CSI pods, daemonset, and StatefulSet +## Status collection for CSI pods, daemonset, and statefulset `kubectl get all -n -l csi` @@ -20,4 +20,22 @@ To collect and display logs, related to the different components of IBM® block ## Log collection for Operator for IBM block storage CSI driver -`kubectl log -f -n ibm-block-csi-operator- -c ibm-block-csi-operator` \ No newline at end of file +`kubectl log -f -n ibm-block-csi-operator- -c ibm-block-csi-operator` + +## Detecting errors + +To help pinpoint potential causes for stateful pod failure: + +1. Verify that all CSI pods are running. + + ``` + kubectl get pods -n -l csi + ``` + +2. If a pod is not in a _Running_ state, run the following command: + + ``` + kubectl describe -n pod/ + ``` + + View the logs. \ No newline at end of file diff --git a/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md b/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md index e30049806..b23bb2ec3 100644 --- a/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md +++ b/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md @@ -12,7 +12,7 @@ kubectl get -n csidriver,sa,clusterrole,clusterrolebinding,stateful ``` ## Error during pod creation -**Note:** This troubleshooting procedure is relevant for volumes using file system types only (not for volumes using raw block volume types). +**Note:** This troubleshooting procedure is relevant for volumes using file system volume mode only (not for volumes using raw block volume mode). If the following error occurs during stateful application pod creation (the pod status is _ContainerCreating_): diff --git a/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md.dcsbackup b/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md.dcsbackup deleted file mode 100644 index 7be9a50ac..000000000 --- a/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md.dcsbackup +++ /dev/null @@ -1,46 +0,0 @@ -# Miscellaneous troubleshooting - -Use this information to help pinpoint potential causes for stateful pod failure. - -**Note:** These procedures are applicable for both Kubernetes and Red Hat® OpenShift®. For Red Hat OpenShift, replace `kubectl` with `oc` in all relevant commands. - -- [General troubleshooting](#general_troubleshooting) -- [Error during pod creation](#error_during_pod_creation) (for volumes using StatefulSet only) - -## General troubleshooting -Use the following command for general troubleshooting: - -``` -kubectl get -n csidriver,sa,clusterrole,clusterrolebinding,statefulset,pod,daemonset | grep ibm-block-csi -``` - -## Error during pod creation -**Note:** This troubleshooting procedure is relevant for volumes using file system types only (not for volumes using raw block volume types). - -If the following error occurs during stateful application pod creation (the pod status is _ContainerCreating_): - -```screen - -8e73-005056a49b44" : rpc error: code = Internal desc = 'fsck' found errors on device /dev/dm-26 but could not correct them: fsck from util-linux 2.23.2 - /dev/mapper/mpathym: One or more block group descriptor checksums are invalid. FIXED. - /dev/mapper/mpathym: Group descriptor 0 checksum is 0x0000, should be 0x3baa. - - /dev/mapper/mpathym: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY. - (i.e., without -a or -p options) -``` -1. Log in to the relevant worker node and run the `fsck` command to repair the filesystem manually. - - `fsck /dev/dm-` - - The pod should come up immediately. If the pod is still in a _ContainerCreating_ state, continue to the next step. - -2. Run the `# multipath -ll` command to see if there are faulty multipath devices. - - If there are faulty multipath devices: - - 1. Restart multipath daemon, using the `systemctl restart multipathd` command. - 2. Rescan any iSCSI devices, using the `rescan-scsi-bus.sh` command. - 3. Restart the multipath daemon again, using the `systemctl restart multipathd` command. - - The multipath devices should be running properly and the pod should come up immediately. - - diff --git a/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md.tminfo b/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md.tminfo deleted file mode 100644 index 0e8c1a902..000000000 --- a/docs/content/troubleshooting/csi_ug_troubleshooting_misc.md.tminfo +++ /dev/null @@ -1,10 +0,0 @@ -PROCESS START: Trademark Scanner 2127-20210504 -PROCESS START: 06-10-2021 11:58:18 -PROCESS: Scan Document -HOMEDIR: C:\Users\410341756\Documents\GitHub\ibm-block-csi-driver\docs -SETTING: Trademark list version is 20201214 -SCAN FILE: C:\Users\410341756\Documents\GitHub\ibm-block-csi-driver\docs\csi_ug_troubleshooting_misc.md -SCAN MESSAGE: Line #4: Trademarks were updated. [OpenShift] [Red Hat] -SCAN RESULT: Trademarks:2 List: [Red Hat](x2) [OpenShift](x2) -PROCESS END: 06-10-2021 11:58:18 -Scanning Complete diff --git a/docs/content/troubleshooting/csi_ug_troubleshooting_node_crash.md b/docs/content/troubleshooting/csi_ug_troubleshooting_node_crash.md index 1a7eba0f5..7f18f3cb4 100644 --- a/docs/content/troubleshooting/csi_ug_troubleshooting_node_crash.md +++ b/docs/content/troubleshooting/csi_ug_troubleshooting_node_crash.md @@ -9,15 +9,15 @@ When a worker node shuts down or crashes, all pods in a StatefulSet that reside For example: -```screen -$> kubectl get nodes -NAME STATUS ROLES AGE VERSION -k8s-master Ready master 6d -k8s-node1 Ready 6d -k8s-node3 NotReady 6d - -$> kubectl get pods --all-namespaces -o wide | grep default -default sanity-statefulset-0 1/1 Terminating 0 19m 10.244.2.37 k8s-node3 +``` +$>kubectl get nodes +NAME STATUS ROLES AGE VERSION +k8s-master Ready master 6d +k8s-node1 Ready 6d +k8s-node2 NotReady 6d + +$>kubectl get pods --all-namespaces -o wide | grep default +default sanity-statefulset-0 1/1 Terminating 0 19m 10.244.2.37 k8s-node2 ``` ## Recovering a crashed node @@ -45,32 +45,30 @@ Follow the following procedure to recover from a crashed node (see a [full examp kubectl delete pod --grace-period=0 --force ``` -5. Verify that the pod is now in a _Running_ state and that the pod has moved to worker-node1. +5. Verify that the pod is now in a _Running_ state and that the pod has moved to a _Ready_ node. For example: -```screen -$> kubectl get nodes -NAME STATUS ROLES AGE VERSION -k8s-master Ready master 6d -k8s-node1 Ready 6d -k8s-node3 NotReady 6d + $> kubectl get nodes + NAME STATUS ROLES AGE VERSION + k8s-master Ready master 6d + k8s-node1 Ready 6d + k8s-node2 NotReady 6d -$> kubectl get pods --all-namespaces -o wide | grep default -default sanity-statefulset-0 1/1 Terminating 0 19m 10.244.2.37 k8s-node3 + $> kubectl get pods --all-namespaces -o wide | grep default + default sanity-statefulset-0 1/1 Terminating 0 19m 10.244.2.37 k8s-node2 -$> kubectl get volumeattachment -NAME AGE -csi-5944e1c742d25e7858a8e48311cdc6cc85218f1156dd6598d4cf824fb1412143 10m + $> kubectl get volumeattachment + NAME AGE + csi-5944e1c742d25e7858a8e48311cdc6cc85218f1156dd6598d4cf824fb1412143 10m -$> kubectl delete volumeattachment csi-5944e1c742d25e7858a8e48311cdc6cc85218f1156dd6598d4cf824fb1412143 -volumeattachment.storage.k8s.io "csi-5944e1c742d25e7858a8e48311cdc6cc85218f1156dd6598d4cf824fb1412143" deleted + $> kubectl delete volumeattachment csi-5944e1c742d25e7858a8e48311cdc6cc85218f1156dd6598d4cf824fb1412143 + volumeattachment.storage.k8s.io "csi-5944e1c742d25e7858a8e48311cdc6cc85218f1156dd6598d4cf824fb1412143" deleted -$> kubectl delete pod sanity-statefulset-0 --grace-period=0 --force -warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. -pod "sanity-statefulset-0" deleted + $> kubectl delete pod sanity-statefulset-0 --grace-period=0 --force + warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + pod "sanity-statefulset-0" deleted -$> kubectl get pods --all-namespaces -o wide | grep default -default sanity-statefulset-0 1/1 Running 0 26s 10.244.1.210 k8s-node1 -``` + $> kubectl get pods --all-namespaces -o wide | grep default + default sanity-statefulset-0 1/1 Running 0 26s 10.244.1.210 k8s-node1 diff --git a/docs/content/using/csi_ug_using_sample.md b/docs/content/using/csi_ug_using_sample.md index ea6e5b731..44032b42a 100644 --- a/docs/content/using/csi_ug_using_sample.md +++ b/docs/content/using/csi_ug_using_sample.md @@ -2,38 +2,24 @@ You can use the CSI (Container Storage Interface) driver for running stateful containers with a storage volume provisioned from IBM® block storage systems. -These examples illustrate a basic configuration required for running a stateful container with volumes provisioned on an IBM Spectrum® Virtualize Family storage system. +These instructions illustrate the general flow for a basic configuration required for running a stateful container with volumes provisioned on storage system. -While these examples specify the use of IBM Spectrum Virtualize products, the same configuration is used on all supported storage system types. - -**Note:** The secret names given can be user specified. When giving secret names when managing different system storage types, be sure to give system type indicators to each name. - -The following are examples of different types of secret names that can be given per storage type. - -|Storage system name|Secret name| -|-------------------|-----------| -|IBM FlashSystem® A9000
IBM FlashSystem A9000R|a9000-array1| -|IBM Spectrum Virtualize Family including IBM SAN Volume Controller and
IBM FlashSystem family members built with IBM Spectrum Virtualize
(including FlashSystem 5xxx, 7200, 9100, 9200, 9200R)|storwize-array1| -|IBM DS8000® Family products|DS8000-array1| - -**Note:** This procedure is applicable for both Kubernetes and Red Hat® OpenShift®. For Red Hat OpenShift, replace `kubectl` with `oc` in all relevant commands. +**Note:** The secret names given are user specified. To implement order and help any debugging that may be required, provide system type indicators to each secret name when managing different system storage types. Use this information to run a stateful container on StatefulSet volumes using either file systems or raw block volumes. -1. Create an array secret, as described in [Creating a Secret](../configuration/csi_ug_config_create_secret.md). +1. Create an array secret, as described in [Creating a Secret](../configuration/csi_ug_config_create_secret.md). -2. Create a storage class, as described in [Creating a StorageClass](../configuration/csi_ug_config_create_storageclasses.md). +2. Create a storage class, as described in [Creating a StorageClass](../configuration/csi_ug_config_create_storageclasses.md). - **Remember:** The `SpaceEfficiency` values for Spectrum Virtualize Family are: thick, thin, compressed, or deduplicated. These values are not case specific. + **Remember:** The `SpaceEfficiency` values for Spectrum Virtualize Family are: `thick`, `thin`, `compressed`, or `deduplicated`. These values are not case specific. - For DS8000 Family systems, the default value is standard, but can be set to thin, if required. These values are not case specific. For more information, see [Creating a StorageClass](../configuration/csi_ug_config_create_storageclasses.md). + For DS8000 Family systems, the default value is `none`, but can be set to `thin`, if required. These values are not case specific. For more information, see [Creating a StorageClass](../configuration/csi_ug_config_create_storageclasses.md). This parameter is not applicable for IBM FlashSystem A9000 and A9000R systems. These systems always include deduplication and compression. -3. Create a PVC with the size of 1 Gb, as described in [Creating a PersistentVolumeClaim (PVC)](../configuration/csi_ug_config_create_pvc.md). - -4. Display the existing PVC and the created persistent volume (PV). - -5. Create a StatefulSet, as described in [Creating a StatefulSet](../configuration/csi_ug_config_create_statefulset.md). +3. Create a PVC with the size of 1 Gb, as described in [Creating a PersistentVolumeClaim (PVC)](../configuration/csi_ug_config_create_pvc.md). +4. (Optional) Display the existing PVC and the created persistent volume (PV). +5. Create a StatefulSet, as described in [Creating a StatefulSet](../configuration/csi_ug_config_create_statefulset.md). \ No newline at end of file diff --git a/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric.go b/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric.go index 8837190ca..b69a8106c 100644 --- a/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric.go +++ b/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric.go @@ -73,6 +73,7 @@ const ( multipathdCmd = "multipathd" multipathCmd = "multipath" VolumeIdDelimiter = ":" + VolumeStorageIdsDelimiter = ";" ) func NewOsDeviceConnectivityHelperScsiGeneric(executer executer.ExecuterInterface) OsDeviceConnectivityHelperScsiGenericInterface { @@ -132,12 +133,21 @@ func (r OsDeviceConnectivityHelperScsiGeneric) RescanDevices(lunId int, arrayIde logger.Debugf("Rescan : finish rescan lun on lun id : {%v}, with array identifiers : {%v}", lunId, arrayIdentifiers) return nil } +func getVolumeUuid(volumeId string) string { + volumeIdParts := strings.Split(volumeId, VolumeIdDelimiter) + idsPart := volumeIdParts[len(volumeIdParts)-1] + splittedIdsPart := strings.Split(idsPart, VolumeStorageIdsDelimiter) + if len(splittedIdsPart) == 2 { + return splittedIdsPart[1] + } else { + return splittedIdsPart[0] + } +} func (r OsDeviceConnectivityHelperScsiGeneric) GetMpathDevice(volumeId string) (string, error) { logger.Infof("GetMpathDevice: Searching multipath devices for volume : [%s] ", volumeId) - volumeIdParts := strings.Split(volumeId, VolumeIdDelimiter) - volumeUuid := volumeIdParts[len(volumeIdParts)-1] + volumeUuid := getVolumeUuid(volumeId) volumeUuidLower := strings.ToLower(volumeUuid) diff --git a/node/pkg/driver/node.go b/node/pkg/driver/node.go index bb558e27c..06e4cea53 100644 --- a/node/pkg/driver/node.go +++ b/node/pkg/driver/node.go @@ -19,6 +19,9 @@ package driver import ( "context" "fmt" + "path" + "strings" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/ibm/ibm-block-csi-driver/node/goid_info" "github.com/ibm/ibm-block-csi-driver/node/logger" @@ -27,8 +30,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/utils/mount" - "path" - "strings" ) var ( @@ -711,7 +712,7 @@ func (d *NodeService) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque } logger.Debugf("discovered topology labels : %v", topologyLabels) - fcExists := d.NodeUtils.IsPathExists(FCPath) + fcExists := d.NodeUtils.IsFCExists() if fcExists { fcWWNs, err = d.NodeUtils.ParseFCPorts() if err != nil { diff --git a/node/pkg/driver/node_test.go b/node/pkg/driver/node_test.go index 542b66a93..70db3e0a4 100644 --- a/node/pkg/driver/node_test.go +++ b/node/pkg/driver/node_test.go @@ -20,16 +20,17 @@ import ( "context" "errors" "fmt" - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/mock/gomock" - "github.com/ibm/ibm-block-csi-driver/node/mocks" - "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" "path" "path/filepath" "reflect" "strings" "testing" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + "github.com/ibm/ibm-block-csi-driver/node/mocks" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -1134,7 +1135,7 @@ func TestNodeGetCapabilities(t *testing.T) { } func TestNodeGetInfo(t *testing.T) { - topologySegments := map[string]string{"topology.kubernetes.io/zone": "testZone"} + topologySegments := map[string]string{"topology.block.csi.ibm.com/zone": "testZone"} testCases := []struct { name string @@ -1217,7 +1218,7 @@ func TestNodeGetInfo(t *testing.T) { fake_nodeutils := mocks.NewMockNodeUtilsInterface(mockCtrl) d := newTestNodeService(fake_nodeutils, nil, nil) fake_nodeutils.EXPECT().GetTopologyLabels(context.TODO(), d.Hostname).Return(topologySegments, nil) - fake_nodeutils.EXPECT().IsPathExists(driver.FCPath).Return(tc.fcExists) + fake_nodeutils.EXPECT().IsFCExists().Return(tc.fcExists) if tc.fcExists { fake_nodeutils.EXPECT().ParseFCPorts().Return(tc.return_fcs, tc.return_fc_err) } diff --git a/node/pkg/driver/node_utils.go b/node/pkg/driver/node_utils.go index 4490df1ef..47aadaf3b 100644 --- a/node/pkg/driver/node_utils.go +++ b/node/pkg/driver/node_utils.go @@ -19,14 +19,16 @@ package driver import ( "context" "fmt" - "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" + "io" "io/ioutil" - "k8s.io/apimachinery/pkg/util/errors" "os" "path" "strconv" "strings" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" + "k8s.io/apimachinery/pkg/util/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -38,7 +40,7 @@ import ( var ( getOpts = metav1.GetOptions{} - topologyPrefixes = [...]string{"topology.kubernetes.io", "topology.block.csi.ibm.com"} + topologyPrefixes = [...]string{"topology.block.csi.ibm.com"} ) const ( @@ -52,6 +54,7 @@ const ( resizeFsTimeoutMilliseconds = 30 * 1000 TimeOutMultipathdCmd = 10 * 1000 multipathdCmd = "multipathd" + minFilesInNonEmptyDir = 1 ) //go:generate mockgen -destination=../../mocks/mock_node_utils.go -package=mocks github.com/ibm/ibm-block-csi-driver/node/pkg/driver NodeUtilsInterface @@ -67,6 +70,7 @@ type NodeUtilsInterface interface { ClearStageInfoFile(filePath string) error StageInfoFileIsExist(filePath string) bool IsPathExists(filePath string) bool + IsFCExists() bool IsDirectory(filePath string) bool RemoveFileOrDirectory(filePath string) error MakeDir(dirPath string) error @@ -239,6 +243,26 @@ func (n NodeUtils) ParseFCPorts() ([]string, error) { return fcPorts, nil } +func (n NodeUtils) IsFCExists() bool { + return n.IsPathExists(FCPath) && !n.isEmptyDir(FCPath) +} + +func (n NodeUtils) isEmptyDir(path string) bool { + f, _ := os.Open(path) + defer f.Close() + + _, err := f.Readdir(minFilesInNonEmptyDir) + + if err != nil { + if err != io.EOF { + logger.Warningf("Check is directory %s empty returned error %s", path, err.Error()) + } + return true + } + + return false +} + func (n NodeUtils) IsPathExists(path string) bool { _, err := os.Stat(path) if err != nil { diff --git a/node/pkg/driver/node_utils_test.go b/node/pkg/driver/node_utils_test.go index 9bb7d0b5b..e5e441bfe 100644 --- a/node/pkg/driver/node_utils_test.go +++ b/node/pkg/driver/node_utils_test.go @@ -19,16 +19,17 @@ package driver_test import ( "errors" "fmt" - gomock "github.com/golang/mock/gomock" - mocks "github.com/ibm/ibm-block-csi-driver/node/mocks" - driver "github.com/ibm/ibm-block-csi-driver/node/pkg/driver" - "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" - executer "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" "io/ioutil" "os" "reflect" "syscall" "testing" + + gomock "github.com/golang/mock/gomock" + mocks "github.com/ibm/ibm-block-csi-driver/node/mocks" + driver "github.com/ibm/ibm-block-csi-driver/node/pkg/driver" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" + executer "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" ) var ( diff --git a/node/pkg/driver/version_test.go b/node/pkg/driver/version_test.go index fe5001785..ffef8696c 100644 --- a/node/pkg/driver/version_test.go +++ b/node/pkg/driver/version_test.go @@ -46,7 +46,7 @@ func TestGetVersion(t *testing.T) { version, err := GetVersion(dir) expected := VersionInfo{ - DriverVersion: "1.6.0", + DriverVersion: "1.7.0", GitCommit: "", BuildDate: "", GoVersion: runtime.Version(), @@ -76,7 +76,7 @@ func TestGetVersionJSON(t *testing.T) { } expected := fmt.Sprintf(`{ - "driverVersion": "1.6.0", + "driverVersion": "1.7.0", "gitCommit": "", "buildDate": "", "goVersion": "%s", diff --git a/reusables/doc-resources.md b/reusables/doc-resources.md deleted file mode 100644 index e8a0029aa..000000000 --- a/reusables/doc-resources.md +++ /dev/null @@ -1,22 +0,0 @@ -IBM resources - -- [IBM SAN Volume Controller documentation](https://www.ibm.com/docs/en/sanvolumecontroller)\(ibm.com/docs/en/sanvolumecontroller) -- [IBM Spectrum Scale documentation](https://www.ibm.com/docs/en/spectrum-scale)\(ibm.com/docs/en/spectrum-scale\) -- [IBM FlashSystem® 5200, 5000, 5100, Storwize® V5100 and V5000E documentation](http://www.ibm.com/docs/en/f555sv-and-v)\(ibm.com/docs/en/f555sv-and-v\) -- [IBM FlashSystem™ 7200 and Storwize V7000 documentation](https://www.ibm.com/docs/en/flashsystem-7x00)\(ibm.com/docs/en/flashsystem-7x00\) -- [IBM Spectrum Virtualize as Software Only documentation](https://www.ibm.com/docs/en/spectrumvirtualsoftw)\(ibm.com/docs/en/spectrumvirtualsoftw\) -- [IBM FlashSystem 9200 and 9100 documentation](https://www.ibm.com/docs/en/flashsystem-9x00)\(ibm.com/docs/en/flashsystem-9x00\) -- [IBM FlashSystem A9000 documentation](https://www.ibm.com/docs/en/flashsystem-a9000)\(ibm.com/docs/en/flashsystem-a9000\) -- [IBM FlashSystem A9000R documentation](https://www.ibm.com/docs/en/flashsystem-a9000r)\(ibm.com/docs/en/flashsystem-a9000r\) -- [IBM DS8880 documentation](https://www.ibm.com/docs/en/ds8880) \(ibm.com/docs/en/ds8880\) -- [IBM DS8900 documentation](https://www.ibm.com/docs/en/ds8900) \(ibm.com/docs/en/ds8900\) -- [IBM Spectrum® Access for IBM Cloud® Private Blueprint](https://www-01.ibm.com/common/ssi/cgi-bin/ssialias?htmlfid=TSW03569USEN&) \(ibm.com/downloads/cas/KK5PGD8E\) - - Used as the FlexVolume driver based solution for OpenShift® 3.11, using [IBM Storage Enabler for Containers](https://www.ibm.com/docs/en/stgenablercontainers)\(ibm.com/docs/en/stgenablercontainers\) - -- [IBM Storage for Red Hat® OpenShift Blueprint](http://www.redbooks.ibm.com/abstracts/redp5565.html?Open) \(http://www.redbooks.ibm.com/abstracts/redp5565.html?Open\) - -External resources -- [Persistent volumes on Kubernetes](https://kubernetes.io/docs/concepts/storage/volumes/) \(kubernetes.io/docs/concepts/storage/volumes\) -- [Kubernetes Documentation](https://kubernetes.io/docs/home/) \(kubernetes.io/docs/home/\) -- [Kubernetes Blog](https://kubernetes.io/blog/) \(kubernetes.io//blog\) diff --git a/scripts/ci/jenkins_pipeline_csi b/scripts/ci/Jenkinsfile similarity index 93% rename from scripts/ci/jenkins_pipeline_csi rename to scripts/ci/Jenkinsfile index 6689dbfc5..95c72e5bb 100644 --- a/scripts/ci/jenkins_pipeline_csi +++ b/scripts/ci/Jenkinsfile @@ -1,4 +1,9 @@ pipeline { + parameters { + string(name: 'IMAGE_VERSION', defaultValue: "1.7.0") + string(name: 'DOCKER_REGISTRY', defaultValue: DEFAULT_DOCKER_REGISTRY) + string(name: 'EMAIL_TO', defaultValue: "") + } environment { registryCredentialsID = 'csi_w3_user' } diff --git a/scripts/ci/jenkins_pipeline_community_csi_test b/scripts/ci/jenkins_pipeline_community_csi_test deleted file mode 100644 index 4f8cee84f..000000000 --- a/scripts/ci/jenkins_pipeline_community_csi_test +++ /dev/null @@ -1,83 +0,0 @@ -pipeline { - agent { - label 'docker-engine' - } - environment { - CONTROLLER_LOGS = "csi_controller_logs" - } - - stages { - stage('Environment Setup') { - agent { - label 'ansible_rhel73' - } - steps { - script{ - echo "checking out XAVI" - if (env.XAVILIB_BRANCH == null) { - env.XAVILIB_BRANCH = 'develop' - } - // Just bring XAVI repo (use it in different stage) - xaviCheckOutScm(path: 'testing/', name: 'xavi', branch: "${env.XAVILIB_BRANCH}") - - - // Generate the new storage conf yaml with relevant envs - env.pwd = sh(returnStdout: true, script: 'pwd').trim() - echo " env.pwd ${env.pwd}" - - env.new_conf_yaml_name = "${env.pwd}/scripts/ci/storage_conf_new.yaml" - sh 'echo new conf yaml ${new_conf_yaml_name}' - - env.full_storage_conf_yaml_path = "${env.pwd}/scripts/ci/storage_conf.yaml" - echo "full storage conf yaml path : ${env.full_storage_conf_yaml_path}" - - echo "replacing username and password in storage-conf file" - // this will replace the username and password env vars in the yaml file. - sh ''' - ( echo "cat < ${new_conf_yaml_name}"; - cat ${full_storage_conf_yaml_path}; - echo "EOF"; - ) > ${new_conf_yaml_name} - . ${new_conf_yaml_name} - cat ${new_conf_yaml_name} - ''' - - echo "getting pool name from yaml file" - env.POOL_NAME = sh(returnStdout: true, script: 'cat ${full_storage_conf_yaml_path} | grep " pools:" -A 4 | grep name | cut -d ":" -f2').trim() - echo "pool name ${POOL_NAME}" - - } - } - } - stage('Configure Storage') { - agent { - label 'ansible_rhel73' - } - steps { - echo "found storage yaml so running ansible to configure storage using yaml file : ${env.new_conf_yaml_name}" - script { - configureStorage(storage_arrays: "${env.STORAGE_ARRAYS}", vars_file: "${env.new_conf_yaml_name}") - } - } - } - - stage ('CSI-controller: build and start controller server and csi sanity tests') { - steps { - sh './scripts/ci/run_community_csi_test.sh' - } - } - } - - post { - always { - sh './scripts/ci/community_csi_test_cleanup.sh csi-controller' - sh './scripts/ci/community_csi_test_cleanup.sh csi-sanity-test' - archiveArtifacts "${env.CONTROLLER_LOGS}, ${env.CONTROLLER_LOGS}_node" - sh 'ls build/reports' - junit 'build/reports/*.xml' - sh '[ -d build/reports ] && rm -rf build/reports' - sh '[ -f `${env.CONTROLLER_LOGS}` ] && rm -f csi_controller_logs' - - } - } -} diff --git a/scripts/ci/run_csi_test_client.sh b/scripts/ci/run_csi_test_client.sh index c21879751..7629e40f2 100755 --- a/scripts/ci/run_csi_test_client.sh +++ b/scripts/ci/run_csi_test_client.sh @@ -7,6 +7,12 @@ if [ $3 = 'community_svc' ] ; then else csi_params='csi_params' fi + +common_tests_to_skip_file="scripts/csi_test/common_csi_tests_to_skip" +array_specific_tests_to_skip_file="scripts/csi_test/$3_csi_tests_to_skip" +tests_to_skip_file="scripts/csi_test/csi_tests_to_skip" +cat $common_tests_to_skip_file $array_specific_tests_to_skip_file > $tests_to_skip_file + #/tmp/k8s_dir is the directory of the csi grpc\unix socket that shared between csi server and csi-test docker -docker build -f Dockerfile-csi-test --build-arg CSI_PARAMS=${csi_params} -t csi-sanity-test . && docker run --user=root -e STORAGE_ARRAYS=${STORAGE_ARRAYS} -e USERNAME=${USERNAME} -e PASSWORD=${PASSWORD} -e POOL_NAME=${POOL_NAME} -v /tmp/k8s_dir:/tmp/k8s_dir:rw -v$2:/tmp/test_results:rw --rm --name $1 csi-sanity-test +docker build -f Dockerfile-csi-test --build-arg CSI_PARAMS=${csi_params} -t csi-sanity-test . && docker run --user=root -e STORAGE_ARRAYS=${STORAGE_ARRAYS} -e USERNAME=${USERNAME} -e PASSWORD=${PASSWORD} -e POOL_NAME=${POOL_NAME} -v /tmp:/tmp:rw -v$2:/tmp/test_results:rw --rm --name $1 csi-sanity-test diff --git a/scripts/csi_test/common_csi_tests_to_skip b/scripts/csi_test/common_csi_tests_to_skip new file mode 100644 index 000000000..691004e40 --- /dev/null +++ b/scripts/csi_test/common_csi_tests_to_skip @@ -0,0 +1,5 @@ +GetCapacity +ListVolumes +volume lifecycle +ListSnapshots +NodeGetVolumeStats diff --git a/scripts/csi_test/community_a9k_csi_tests_to_skip b/scripts/csi_test/community_a9k_csi_tests_to_skip new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/csi_test/community_ds8k_csi_tests_to_skip b/scripts/csi_test/community_ds8k_csi_tests_to_skip new file mode 100644 index 000000000..a48deba96 --- /dev/null +++ b/scripts/csi_test/community_ds8k_csi_tests_to_skip @@ -0,0 +1 @@ +should create volume from an existing source snapshot diff --git a/scripts/csi_test/community_svc_csi_tests_to_skip b/scripts/csi_test/community_svc_csi_tests_to_skip new file mode 100644 index 000000000..572a33b98 --- /dev/null +++ b/scripts/csi_test/community_svc_csi_tests_to_skip @@ -0,0 +1,4 @@ +should create volume from an existing source snapshot +should create volume from an existing source volume +should succeed when requesting to create a snapshot with already existing name and same source volume ID +should fail when requesting to create a snapshot with already existing name and different source volume ID diff --git a/scripts/csi_test/csi_params b/scripts/csi_test/csi_params index b8bd87cf6..b524c8581 100644 --- a/scripts/csi_test/csi_params +++ b/scripts/csi_test/csi_params @@ -1,3 +1,3 @@ #SpaceEfficiency: Compression pool: POOL_NAME -#volume_name_prefix: v_olga +#volume_name_prefix: vol_ diff --git a/scripts/csi_test/csi_tests_to_run b/scripts/csi_test/csi_tests_to_run deleted file mode 100644 index 8cb29b064..000000000 --- a/scripts/csi_test/csi_tests_to_run +++ /dev/null @@ -1,18 +0,0 @@ -ControllerGetCapabilities -CreateVolume -DeleteVolume -ExpandVolume -NodeGetCapabilities -NodeGetInfo -CreateSnapshot -DeleteSnapshot -NodePublishVolume -NodeUnpublishVolume -NodeStageVolume -NodeUnstageVolume -GetPluginCapabilities -GetPluginInfo -Probe -ControllerPublishVolume -ControllerUnpublishVolume -Node Service diff --git a/scripts/csi_test/entrypoint-csi-tests.sh b/scripts/csi_test/entrypoint-csi-tests.sh index 7b1544b69..a75b0f72c 100755 --- a/scripts/csi_test/entrypoint-csi-tests.sh +++ b/scripts/csi_test/entrypoint-csi-tests.sh @@ -8,8 +8,8 @@ sed -i -e "s/PASSWORD/${PASSWORD}/g" ${SECRET_FILE} echo "update params file" sed -i -e "s/POOL_NAME/${POOL_NAME}/g" ${PARAM_FILE} -# get tests to run -TESTS=`cat ${TESTS_TO_RUN_FILE}| sed -Ez '$ s/\n+$//' | tr '\n' "|"` +# get tests to skip +TESTS=`cat ${TESTS_TO_SKIP_FILE}| sed -Ez '$ s/\n+$//' | tr '\n' "|"` /usr/local/go/src/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/csi-sanity \ --csi.endpoint ${ENDPOINT} \ @@ -19,5 +19,5 @@ TESTS=`cat ${TESTS_TO_RUN_FILE}| sed -Ez '$ s/\n+$//' | tr '\n' "|"` --csi.junitfile ${JUNIT_OUTPUT} \ --ginkgo.v \ --ginkgo.debug \ ---ginkgo.focus "${TESTS}" +--ginkgo.skip "${TESTS}"