diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f1b20886..f5854a7dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,13 @@ [Releases](https://github.com/NetApp/trident/releases) -## Changes since v23.01.0 +## Changes since v23.04.0 + +**Fixes:** + +- Fixed ONTAP ZAPI request to ensure LUN serial number is queried when getting LUN attributes. + +## v23.04.0 - **IMPORTANT**: Force volume detach for ONTAP-SAN-* volumes is only supported with Kubernetes versions which have enabled the Non-Graceful Node Shutdown feature gate. Force detach must be enabled at install time via `--enable-force-detach` Trident installer flag. diff --git a/Dockerfile b/Dockerfile index 864f37755..7c75fea56 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ ARG ARCH=amd64 -FROM --platform=linux/${ARCH} gcr.io/distroless/static@sha256:a01d47d4036cae5a67a9619e3d06fa14a6811a2247b4da72b4233ece4efebd57 +FROM --platform=linux/${ARCH} gcr.io/distroless/static@sha256:7198a357ff3a8ef750b041324873960cf2153c11cc50abb9d8d5f8bb089f6b4e LABEL maintainers="The NetApp Trident Team" \ app="trident.netapp.io" \ diff --git a/Makefile b/Makefile index 7c8ffc014..73550b496 100644 --- a/Makefile +++ b/Makefile @@ -323,20 +323,24 @@ ifeq ($(BUILD_CLI),$(DOCKER_BUILDX_BUILD_CLI)) -@$(call buildx_create_instance,$(BUILDX_CONFIG_FILE)) endif @$(call build_images_for_platforms,$(call all_image_platforms,$(PLATFORMS)),$(BUILD_CLI),$(TRIDENT_TAG),$(BUILDX_OUTPUT)) -# if a single image platform is specified, retag image without platform +# if a single image platform is specified and the BUILD_CLI places images in the default context, retag image without platform ifeq (1,$(words $(call all_image_platforms,$(PLATFORMS)))) +ifneq (,$(if $(findstring $(DOCKER_BUILDX_BUILD_CLI),$(BUILD_CLI)),$(findstring load,$(BUILDX_OUTPUT)),true)) @$(DOCKER_CLI) tag $(call image_tag,$(TRIDENT_TAG),$(call all_image_platforms,$(PLATFORMS))) $(MANIFEST_TAG) endif +endif operator_images: operator_binaries ifeq ($(BUILD_CLI),$(DOCKER_BUILDX_BUILD_CLI)) -@$(call buildx_create_instance,$(BUILDX_CONFIG_FILE)) endif @$(call build_operator_images_for_platforms,$(call operator_image_platforms,$(PLATFORMS)),$(BUILD_CLI),$(OPERATOR_TAG),$(BUILDX_OUTPUT)) -# if a single operator image platform is specified, retag image without platform +# if a single operator image platform is specified and the BUILD_CLI places images in the default context, retag image without platform ifeq (1,$(words $(call operator_image_platforms,$(PLATFORMS)))) +ifneq (,$(if $(findstring $(DOCKER_BUILDX_BUILD_CLI),$(BUILD_CLI)),$(findstring load,$(BUILDX_OUTPUT)),true)) @$(DOCKER_CLI) tag $(call image_tag,$(OPERATOR_TAG),$(call operator_image_platforms,$(PLATFORMS))) $(OPERATOR_MANIFEST_TAG) endif +endif # creates multi-platform image manifest manifest: images diff --git a/cli/k8s_client/client_factory.go b/cli/k8s_client/client_factory.go index 37965337f..03cb8649b 100644 --- a/cli/k8s_client/client_factory.go +++ b/cli/k8s_client/client_factory.go @@ -39,6 +39,8 @@ type Clients struct { const ( k8sTimeout = 30 * time.Second defaultNamespace = "default" + QPS = 50 + burstTime = 100 ) var cachedClients *Clients @@ -198,6 +200,8 @@ func createK8SClientsExCluster( } // Create the CLI-based Kubernetes client + restConfig.QPS = QPS + restConfig.Burst = burstTime k8sClient, err := NewKubeClient(restConfig, namespace, k8sTimeout) if err != nil { return nil, fmt.Errorf("could not initialize Kubernetes client; %v", err) @@ -220,6 +224,8 @@ func createK8SClientsInCluster(ctx context.Context, overrideNamespace string) (* if err != nil { return nil, err } + restConfig.QPS = QPS + restConfig.Burst = burstTime // when running in a pod, we use the Trident pod's namespace namespaceBytes, err := os.ReadFile(config.NamespaceFile) diff --git a/cli/k8s_client/yaml_factory.go b/cli/k8s_client/yaml_factory.go index 961d84075..4d7bd5a6b 100644 --- a/cli/k8s_client/yaml_factory.go +++ b/cli/k8s_client/yaml_factory.go @@ -587,7 +587,7 @@ spec: - name: asup-dir mountPath: /asup - name: csi-provisioner - image: {CSI_SIDECAR_REGISTRY}/csi-provisioner:v3.4.1 + image: {CSI_SIDECAR_REGISTRY}/csi-provisioner:v3.5.0 imagePullPolicy: {IMAGE_PULL_POLICY} securityContext: capabilities: @@ -607,7 +607,7 @@ spec: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-attacher - image: {CSI_SIDECAR_REGISTRY}/csi-attacher:v4.2.0 + image: {CSI_SIDECAR_REGISTRY}/csi-attacher:v4.3.0 imagePullPolicy: {IMAGE_PULL_POLICY} securityContext: capabilities: @@ -625,7 +625,7 @@ spec: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-resizer - image: {CSI_SIDECAR_REGISTRY}/csi-resizer:v1.7.0 + image: {CSI_SIDECAR_REGISTRY}/csi-resizer:v1.8.0 imagePullPolicy: {IMAGE_PULL_POLICY} args: - "--v={SIDECAR_LOG_LEVEL}" @@ -638,7 +638,7 @@ spec: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-snapshotter - image: {CSI_SIDECAR_REGISTRY}/csi-snapshotter:v6.2.1 + image: {CSI_SIDECAR_REGISTRY}/csi-snapshotter:v6.2.2 imagePullPolicy: {IMAGE_PULL_POLICY} securityContext: capabilities: @@ -955,7 +955,7 @@ spec: mountPath: /certs readOnly: true - name: driver-registrar - image: {CSI_SIDECAR_REGISTRY}/csi-node-driver-registrar:v2.7.0 + image: {CSI_SIDECAR_REGISTRY}/csi-node-driver-registrar:v2.8.0 imagePullPolicy: {IMAGE_PULL_POLICY} args: - "--v={SIDECAR_LOG_LEVEL}" @@ -1157,7 +1157,7 @@ spec: cpu: 10m memory: 20Mi - name: node-driver-registrar - image: {CSI_SIDECAR_REGISTRY}/csi-node-driver-registrar:v2.7.0 + image: {CSI_SIDECAR_REGISTRY}/csi-node-driver-registrar:v2.8.0 imagePullPolicy: {IMAGE_PULL_POLICY} args: - --v=2 @@ -1377,6 +1377,8 @@ allowHostPID: true allowHostPorts: false allowPrivilegeEscalation: true allowPrivilegedContainer: true +allowedCapabilities: +- SYS_ADMIN allowedUnsafeSysctls: null defaultAddCapabilities: null fsGroup: diff --git a/cli/k8s_client/yaml_factory_test.go b/cli/k8s_client/yaml_factory_test.go index 56daf8c4f..50d3b6a49 100644 --- a/cli/k8s_client/yaml_factory_test.go +++ b/cli/k8s_client/yaml_factory_test.go @@ -1016,7 +1016,10 @@ func TestGetOpenShiftSCCYAML(t *testing.T) { AllowHostPorts: false, AllowPrivilegeEscalation: &allowPrivilegeEscalation, AllowPrivilegedContainer: true, - DefaultAddCapabilities: nil, + AllowedCapabilities: []v1.Capability{ + "SYS_ADMIN", + }, + DefaultAddCapabilities: nil, FSGroup: scc.FSGroupStrategyOptions{ Type: "RunAsAny", }, diff --git a/config/config.go b/config/config.go index c68dcce67..c28429721 100644 --- a/config/config.go +++ b/config/config.go @@ -39,7 +39,7 @@ const ( OrchestratorName = "trident" OrchestratorClientName = OrchestratorName + "ctl" OrchestratorAPIVersion = "1" - DefaultOrchestratorVersion = "23.04.0" + DefaultOrchestratorVersion = "23.07.0" PersistentStoreBootstrapAttempts = 30 PersistentStoreBootstrapTimeout = PersistentStoreBootstrapAttempts * time.Second PersistentStoreTimeout = 10 * time.Second @@ -158,7 +158,7 @@ const ( Darwin = "darwin" // Minimum and maximum supported Kubernetes versions - KubernetesVersionMin = "v1.21" + KubernetesVersionMin = "v1.22" KubernetesVersionMax = "v1.27" // KubernetesCSISidecarRegistry is where the CSI sidecar images are hosted @@ -176,7 +176,7 @@ const ( /* Kubernetes operator constants */ OperatorContainerName = "trident-operator" - DefaultAutosupportImage = "docker.io/netapp/trident-autosupport:23.01" + DefaultAutosupportImage = "docker.io/netapp/trident-autosupport:23.04" // IscsiSelfHealingInterval is an interval with which the iSCSI self-healing thread is called periodically IscsiSelfHealingInterval = 300 * time.Second @@ -248,6 +248,10 @@ var ( 6: "SINGLE_NODE_SINGLE_WRITER", 7: "SINGLE_NODE_MULTI_WRITER", } + + // DisableExtraFeatures makes a subset of Trident features disabled + // This can be removed when ACP replaces feature-gating + DisableExtraFeatures = true ) func IsValidProtocol(p Protocol) bool { diff --git a/contrib/docker/plugin/Dockerfile b/contrib/docker/plugin/Dockerfile index 731cd6844..00b408d1b 100644 --- a/contrib/docker/plugin/Dockerfile +++ b/contrib/docker/plugin/Dockerfile @@ -1,6 +1,6 @@ FROM busybox:uclibc as busybox -FROM gcr.io/distroless/static:b3e0897b507e86f0dab5bb99861e297d53891e84 +FROM gcr.io/distroless/static@sha256:7198a357ff3a8ef750b041324873960cf2153c11cc50abb9d8d5f8bb089f6b4e LABEL maintainers="The NetApp Trident Team" \ app="trident.netapp.io" \ diff --git a/core/orchestrator_core.go b/core/orchestrator_core.go index 91b7f6eed..416c3bccb 100644 --- a/core/orchestrator_core.go +++ b/core/orchestrator_core.go @@ -3271,9 +3271,16 @@ func (o *TridentOrchestrator) unpublishVolume(ctx context.Context, volumeName, n return fmt.Errorf(msg) } + // Get node attributes from the node ID + nodeInfo, err := o.GetNode(ctx, nodeName) + if err != nil { + Logc(ctx).WithError(err).WithField("Node info not found for node ", nodeName) + return err + } publishInfo := &utils.VolumePublishInfo{ HostName: nodeName, TridentUUID: o.uuid, + HostNQN: nodeInfo.NQN, } volume, ok := o.subordinateVolumes[volumeName] @@ -3434,8 +3441,9 @@ func (o *TridentOrchestrator) AttachVolume( return utils.MountDevice(ctx, loopDeviceName, mountpoint, publishInfo.SubvolumeMountOptions, isRawBlock) } } else { - return utils.AttachISCSIVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{}, + _, err := utils.AttachISCSIVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{}, AttachISCSIVolumeTimeoutLong) + return err } } @@ -3945,7 +3953,7 @@ func (o *TridentOrchestrator) ImportSnapshot( // Complete the snapshot config. snapshotConfig.VolumeInternalName = volume.Config.InternalName snapshotConfig.LUKSPassphraseNames = volume.Config.LUKSPassphraseNames - snapshotConfig.ImportNotManaged = true // All imported snapshots are not managed. + snapshotConfig.ImportNotManaged = volume.Config.ImportNotManaged // Snapshots inherit the managed state of their volume // Query the storage backend for the snapshot. snapshot, err := backend.GetSnapshot(ctx, snapshotConfig, volume.Config) diff --git a/core/orchestrator_core_test.go b/core/orchestrator_core_test.go index d89a11f70..14aa39fc1 100644 --- a/core/orchestrator_core_test.go +++ b/core/orchestrator_core_test.go @@ -6963,6 +6963,69 @@ func TestImportSnapshot(t *testing.T) { } snapName := "snapshot-import" snapInternalName := "snap.2023-05-23_175116" + snapConfig := &storage.SnapshotConfig{ + Version: "1", + Name: snapName, + VolumeName: volumeName, + InternalName: snapInternalName, + VolumeInternalName: volumeInternalName, + ImportNotManaged: false, + } + snapshot := &storage.Snapshot{ + Config: snapConfig, + Created: "2023-05-15T17:04:09Z", + SizeBytes: 1024, + } + + // Initialize mocks. + mockCtrl := gomock.NewController(t) + mockBackend := mockstorage.NewMockBackend(mockCtrl) + mockStore := mockpersistentstore.NewMockStoreClient(mockCtrl) + + // Set up common mock expectations between test cases. + mockBackend.EXPECT().GetDriverName().Return(backendUUID).AnyTimes() + mockBackend.EXPECT().Name().Return(backendUUID).AnyTimes() + mockBackend.EXPECT().State().Return(storage.Online).AnyTimes() + mockBackend.EXPECT().BackendUUID().Return(backendUUID).AnyTimes() + + // Set up test case specific mock expectations and inject mocks into core. + mockBackend.EXPECT().GetSnapshot( + gomock.Any(), snapConfig, volume.Config, + ).Return(snapshot, nil) + mockStore.EXPECT().AddSnapshot(gomock.Any(), snapshot).Return(nil) + + o.storeClient = mockStore + o.backends[volume.BackendUUID] = mockBackend + o.volumes[snapConfig.VolumeName] = volume + + // Call method under test and make assertions. + importedSnap, err := o.ImportSnapshot(ctx(), snapConfig) + assert.NoError(t, err) + assert.NotNil(t, importedSnap) + assert.EqualValues(t, snapshot.ConstructExternal(), importedSnap) +} + +func TestImportSnapshot_VolumeNotManaged(t *testing.T) { + o := getOrchestrator(t, false) + + // Initialize variables used in all subtests. + backendUUID := "test-backend" + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volume := &storage.Volume{ + Config: &storage.VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + ImportOriginalName: "import-" + volumeName, + ImportBackendUUID: "import-" + backendUUID, + ImportNotManaged: true, + LUKSPassphraseNames: nil, + }, + BackendUUID: backendUUID, + } + snapName := "snapshot-import" + snapInternalName := "snap.2023-05-23_175116" snapConfig := &storage.SnapshotConfig{ Version: "1", Name: snapName, diff --git a/deploy/bundle_post_1_25.yaml b/deploy/bundle_post_1_25.yaml index fc3a51a74..3da3af0ad 100644 --- a/deploy/bundle_post_1_25.yaml +++ b/deploy/bundle_post_1_25.yaml @@ -454,7 +454,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: trident-operator - image: docker.io/netapp/trident-operator:23.04.0 + image: docker.io/netapp/trident-operator:23.07.0 imagePullPolicy: IfNotPresent name: trident-operator securityContext: diff --git a/deploy/bundle_pre_1_25.yaml b/deploy/bundle_pre_1_25.yaml index 00cb3a0d7..57fff3fb4 100644 --- a/deploy/bundle_pre_1_25.yaml +++ b/deploy/bundle_pre_1_25.yaml @@ -457,7 +457,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: trident-operator - image: docker.io/netapp/trident-operator:23.04.0 + image: docker.io/netapp/trident-operator:23.07.0 imagePullPolicy: IfNotPresent name: trident-operator securityContext: diff --git a/deploy/crds/tridentorchestrator_cr_autosupport.yaml b/deploy/crds/tridentorchestrator_cr_autosupport.yaml index 164de9e6b..d963cab57 100644 --- a/deploy/crds/tridentorchestrator_cr_autosupport.yaml +++ b/deploy/crds/tridentorchestrator_cr_autosupport.yaml @@ -6,5 +6,5 @@ spec: debug: true namespace: trident silenceAutosupport: false - autosupportImage: "netapp/trident-autosupport:23.01" + autosupportImage: "netapp/trident-autosupport:23.04" autosupportProxy: "http://proxy.example.com:8888" diff --git a/deploy/crds/tridentorchestrator_cr_customimage.yaml b/deploy/crds/tridentorchestrator_cr_customimage.yaml index d19516db0..c7465dab6 100644 --- a/deploy/crds/tridentorchestrator_cr_customimage.yaml +++ b/deploy/crds/tridentorchestrator_cr_customimage.yaml @@ -5,4 +5,4 @@ metadata: spec: debug: true namespace: trident - tridentImage: localhost:5000/netapp/trident:23.01 + tridentImage: localhost:5000/netapp/trident:23.04 diff --git a/deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml b/deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml index af314778c..c3dad1b69 100644 --- a/deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml +++ b/deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml @@ -5,6 +5,6 @@ metadata: spec: debug: true namespace: trident - tridentImage: netapp/trident:23.04.0 + tridentImage: netapp/trident:23.07.0 imagePullSecrets: - thisisasecret diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 2e74d7e8e..486c1b380 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -22,7 +22,7 @@ spec: serviceAccountName: trident-operator containers: - name: trident-operator - image: docker.io/netapp/trident-operator:23.04.0 + image: docker.io/netapp/trident-operator:23.07.0 command: - "/trident-operator" - "--debug" diff --git a/frontend/crd/snapshot_restore.go b/frontend/crd/snapshot_restore.go index 714a73d81..c33e8f9f8 100644 --- a/frontend/crd/snapshot_restore.go +++ b/frontend/crd/snapshot_restore.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" + "github.com/netapp/trident/config" . "github.com/netapp/trident/logging" netappv1 "github.com/netapp/trident/persistent_store/crd/apis/netapp/v1" "github.com/netapp/trident/storage" @@ -61,6 +62,10 @@ func (c *TridentCrdController) handleActionSnapshotRestore(keyItem *KeyItem) (re } }() + if config.DisableExtraFeatures { + return errors.UnsupportedError("snapshot restore is not enabled") + } + // Detect a CR that is in progress but is not a retry from the workqueue. This can only happen // if Trident restarted while processing a CR, in which case we move the CR directly to Failed. if actionCR.InProgress() && !keyItem.isRetry { diff --git a/frontend/crd/snapshot_restore_test.go b/frontend/crd/snapshot_restore_test.go index 4b8c0f580..3a72498fe 100644 --- a/frontend/crd/snapshot_restore_test.go +++ b/frontend/crd/snapshot_restore_test.go @@ -14,6 +14,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/netapp/trident/config" mockcore "github.com/netapp/trident/mocks/mock_core" netappv1 "github.com/netapp/trident/persistent_store/crd/apis/netapp/v1" "github.com/netapp/trident/utils" @@ -141,6 +142,9 @@ func fakeTASR(name, namespace, pvcName, vsName string) *netappv1.TridentActionSn } func TestHandleActionSnapshotRestore(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -237,7 +241,78 @@ func TestHandleActionSnapshotRestore(t *testing.T) { assert.True(t, apierrors.IsNotFound(err), "TASR should not have been found") } +func TestHandleActionSnapshotRestore_Disabled(t *testing.T) { + mockCtrl := gomock.NewController(t) + orchestrator := mockcore.NewMockOrchestrator(mockCtrl) + + tridentNamespace := "trident" + kubeClient := GetTestKubernetesClientset() + snapClient := GetTestSnapshotClientset() + crdClient := GetTestCrdClientset() + crdController, err := newTridentCrdControllerImpl(orchestrator, tridentNamespace, kubeClient, snapClient, crdClient) + if err != nil { + t.Fatalf("cannot create Trident CRD controller frontend; %v", err) + } + + // Activate the CRD controller and start monitoring + if err = crdController.Activate(); err != nil { + t.Fatalf("error while activating; %v", err) + } + time.Sleep(250 * time.Millisecond) + + pvc := fakeSnapRestorePVC(snapRestorePVC1, namespace1, snapRestorePV1) + _, _ = kubeClient.CoreV1().PersistentVolumeClaims(namespace1).Create(ctx(), pvc, createOpts) + + pv := fakePV(snapRestorePVC1, namespace1, snapRestorePV1) + _, _ = kubeClient.CoreV1().PersistentVolumes().Create(ctx(), pv, createOpts) + + vs1Time := time.Now() + vs2Time := vs1Time.Add(1 * time.Second) + vs3Time := vs2Time.Add(1 * time.Second) + + vs1 := fakeVS(snapRestoreSnap1, namespace1, snapRestoreVSC1, snapRestorePVC1, vs1Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshots(namespace1).Create(ctx(), vs1, createOpts) + + vsc1 := fakeVSC(snapRestoreSnap1, namespace1, snapRestoreVSC1, snapRestoreSnapHandle1, vs1Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshotContents().Create(ctx(), vsc1, createOpts) + + vs2 := fakeVS(snapRestoreSnap2, namespace1, snapRestoreVSC2, snapRestorePVC1, vs2Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshots(namespace1).Create(ctx(), vs2, createOpts) + + vsc2 := fakeVSC(snapRestoreSnap2, namespace1, snapRestoreVSC2, snapRestoreSnapHandle2, vs2Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshotContents().Create(ctx(), vsc2, createOpts) + + vs3 := fakeVS(snapRestoreSnap3, namespace1, snapRestoreVSC3, snapRestorePVC1, vs3Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshots(namespace1).Create(ctx(), vs3, createOpts) + + vsc3 := fakeVSC(snapRestoreSnap3, namespace1, snapRestoreVSC3, snapRestoreSnapHandle3, vs3Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshotContents().Create(ctx(), vsc3, createOpts) + + tasr := fakeTASR(tasr1, namespace1, snapRestorePVC1, snapRestoreSnap3) + _, _ = crdClient.TridentV1().TridentActionSnapshotRestores(namespace1).Create(ctx(), tasr, createOpts) + + // Wait until the operation completes + for i := 0; i < 20; i++ { + time.Sleep(250 * time.Millisecond) + + tasr, err = crdClient.TridentV1().TridentActionSnapshotRestores(namespace1).Get(ctx(), tasr1, getOpts) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + break + } else if tasr.IsComplete() { + break + } + } + + assert.True(t, tasr.Failed(), "TASR operation did not fail") +} + func TestHandleActionSnapshotRestore_InProgressError(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) diff --git a/frontend/crd/trident_action_mirror_update.go b/frontend/crd/trident_action_mirror_update.go index 69e14665b..63e2de33d 100644 --- a/frontend/crd/trident_action_mirror_update.go +++ b/frontend/crd/trident_action_mirror_update.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" + "github.com/netapp/trident/config" . "github.com/netapp/trident/logging" netappv1 "github.com/netapp/trident/persistent_store/crd/apis/netapp/v1" "github.com/netapp/trident/storage" @@ -62,6 +63,10 @@ func (c *TridentCrdController) handleActionMirrorUpdate(keyItem *KeyItem) (updat } }() + if config.DisableExtraFeatures { + return errors.UnsupportedError("mirror update is not enabled") + } + // Detect a CR that is in progress but is not a retry from the workqueue. // This can only happen if Trident restarted while processing a CR, in which case we move the CR directly to Failed. if actionCR.InProgress() && !keyItem.isRetry { diff --git a/frontend/crd/trident_action_mirror_update_test.go b/frontend/crd/trident_action_mirror_update_test.go index a996ea8c9..32f9fba8e 100644 --- a/frontend/crd/trident_action_mirror_update_test.go +++ b/frontend/crd/trident_action_mirror_update_test.go @@ -13,6 +13,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/netapp/trident/config" mockcore "github.com/netapp/trident/mocks/mock_core" netappv1 "github.com/netapp/trident/persistent_store/crd/apis/netapp/v1" "github.com/netapp/trident/utils" @@ -107,6 +108,9 @@ func fakeTAMU(name, namespace, tmrName, snapshotHandle string) *netappv1.Trident } func TestHandleActionMirrorUpdate(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -258,6 +262,9 @@ func TestHandleActionMirrorUpdate_ValidateFailure(t *testing.T) { } func TestHandleActionMirrorUpdate_InProgress(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -320,7 +327,56 @@ func TestHandleActionMirrorUpdate_InProgress(t *testing.T) { assert.True(t, tamu.Succeeded(), "TAMU operation failed") } +func TestHandleActionMirrorUpdate_InProgress_Disabled(t *testing.T) { + mockCtrl := gomock.NewController(t) + orchestrator := mockcore.NewMockOrchestrator(mockCtrl) + + tridentNamespace := "trident" + kubeClient := GetTestKubernetesClientset() + snapClient := GetTestSnapshotClientset() + crdClient := GetTestCrdClientset() + crdController, err := newTridentCrdControllerImpl(orchestrator, tridentNamespace, kubeClient, snapClient, crdClient) + if err != nil { + t.Fatalf("cannot create Trident CRD controller frontend, error: %v", err.Error()) + } + + // Activate the CRD controller and start monitoring + if err = crdController.Activate(); err != nil { + t.Fatalf("error while activating: %v", err.Error()) + } + delaySeconds(1) + + pvc := fakePVC(pvc1, namespace1, pv1) + _, _ = kubeClient.CoreV1().PersistentVolumeClaims(namespace1).Create(ctx(), pvc, createOpts) + + tmr := fakeTMR(tmrName1, namespace1, pvc1) + _, _ = crdClient.TridentV1().TridentMirrorRelationships(namespace1).Create(ctx(), tmr, createOpts) + + tamu := fakeTAMU(tamu1, namespace1, tmrName1, snapHandle1) + _, _ = crdClient.TridentV1().TridentActionMirrorUpdates(namespace1).Create(ctx(), tamu, createOpts) + + // Wait until the operation completes + for i := 0; i < 5; i++ { + time.Sleep(250 * time.Millisecond) + + tamu, err = crdClient.TridentV1().TridentActionMirrorUpdates(namespace1).Get(ctx(), tamu1, getOpts) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + break + } else if tamu.IsComplete() { + break + } + } + + assert.True(t, tamu.Failed(), "TAMU operation was not disabled") +} + func TestHandleActionMirrorUpdate_InProgressAtStartup(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -362,6 +418,9 @@ func TestHandleActionMirrorUpdate_InProgressAtStartup(t *testing.T) { } func TestUpdateActionMirrorUpdateCRInProgress(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) transferTime, _ := time.Parse(utils.TimestampFormat, previousTransferTime) @@ -397,6 +456,9 @@ func TestUpdateActionMirrorUpdateCRInProgress(t *testing.T) { } func TestUpdateActionMirrorUpdateCRComplete_Succeeded(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -430,6 +492,9 @@ func TestUpdateActionMirrorUpdateCRComplete_Succeeded(t *testing.T) { } func TestUpdateActionMirrorUpdateCRComplete_Failed(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) diff --git a/frontend/csi/controller_api/rest.go b/frontend/csi/controller_api/rest.go index 3fc8a5509..b3c781e69 100644 --- a/frontend/csi/controller_api/rest.go +++ b/frontend/csi/controller_api/rest.go @@ -127,10 +127,20 @@ func (c *ControllerRestClient) CreateNode(ctx context.Context, node *utils.Node) if err != nil { return CreateNodeResponse{}, fmt.Errorf("error parsing create node request; %v", err) } - resp, respBody, err := c.InvokeAPI(ctx, nodeData, "PUT", config.NodeURL+"/"+node.Name, false, false) + + createRequest := func() (*http.Response, []byte, error) { + resp, respBody, err := c.InvokeAPI(ctx, nodeData, "PUT", config.NodeURL+"/"+node.Name, false, false) + if err != nil { + return resp, respBody, fmt.Errorf("could not log into the Trident CSI Controller: %v", err) + } + return resp, respBody, nil + } + + resp, respBody, err := c.requestAndRetry(ctx, createRequest) if err != nil { - return CreateNodeResponse{}, fmt.Errorf("could not log into the Trident CSI Controller: %v", err) + return CreateNodeResponse{}, fmt.Errorf("failed during retry for CreateNode: %v", err) } + createResponse := CreateNodeResponse{} if err := json.Unmarshal(respBody, &createResponse); err != nil { return createResponse, fmt.Errorf("could not parse node : %s; %v", string(respBody), err) diff --git a/frontend/csi/node_server.go b/frontend/csi/node_server.go index 1f5997163..4d6134dd6 100644 --- a/frontend/csi/node_server.go +++ b/frontend/csi/node_server.go @@ -422,7 +422,8 @@ func (p *Plugin) NodeExpandVolume( }).Warn("Received something other than the expected stagingTargetPath.") } - err = p.nodeExpandVolume(ctx, &trackingInfo.VolumePublishInfo, requiredBytes, stagingTargetPath, volumeId, req.GetSecrets()) + err = p.nodeExpandVolume(ctx, &trackingInfo.VolumePublishInfo, requiredBytes, stagingTargetPath, volumeId, + req.GetSecrets()) if err != nil { return nil, err } @@ -695,6 +696,8 @@ func (p *Plugin) nodeGetInfo(ctx context.Context) *utils.Node { nvmeNQN, err = p.nvmeHandler.GetHostNqn(ctx) if err != nil { Logc(ctx).WithError(err).Warn("Problem getting Host NQN.") + } else { + Logc(ctx).WithField("NQN", nvmeNQN).Debug("Discovered NQN.") } } else { Logc(ctx).Info("NVMe is not active on this host.") @@ -1015,11 +1018,12 @@ func (p *Plugin) populatePublishedSessions(ctx context.Context) { volumeIDs := utils.GetAllVolumeIDs(ctx, tridentDeviceInfoPath) for _, volumeID := range volumeIDs { trackingInfo, err := p.nodeHelper.ReadTrackingInfo(ctx, volumeID) - if err != nil { + if err != nil || trackingInfo == nil { Logc(ctx).WithFields(LogFields{ - "VolumeID": volumeID, - "Error": err.Error(), - }).Error("Volume tracking file info not found.") + "volumeID": volumeID, + "error": err.Error(), + "isEmpty": trackingInfo == nil, + }).Error("Volume tracking file info not found or is empty.") continue } @@ -1035,6 +1039,26 @@ func (p *Plugin) populatePublishedSessions(ctx context.Context) { } } +func (p *Plugin) readAllTrackingFiles(ctx context.Context) []utils.VolumePublishInfo { + publishInfos := make([]utils.VolumePublishInfo, 0) + volumeIDs := utils.GetAllVolumeIDs(ctx, tridentDeviceInfoPath) + for _, volumeID := range volumeIDs { + trackingInfo, err := p.nodeHelper.ReadTrackingInfo(ctx, volumeID) + if err != nil || trackingInfo == nil { + Logc(ctx).WithError(err).WithFields(LogFields{ + "volumeID": volumeID, + "isEmpty": trackingInfo == nil, + }).Error("Volume tracking file info not found or is empty.") + + continue + } + + publishInfos = append(publishInfos, trackingInfo.VolumePublishInfo) + } + + return publishInfos +} + func (p *Plugin) nodeStageISCSIVolume( ctx context.Context, req *csi.NodeStageVolumeRequest, publishInfo *utils.VolumePublishInfo, ) error { @@ -1096,7 +1120,8 @@ func (p *Plugin) nodeStageISCSIVolume( } } - if err = p.ensureAttachISCSIVolume(ctx, req, "", publishInfo, AttachISCSIVolumeTimeoutShort); err != nil { + mpathSize, err := p.ensureAttachISCSIVolume(ctx, req, "", publishInfo, AttachISCSIVolumeTimeoutShort) + if err != nil { return err } @@ -1105,7 +1130,8 @@ func (p *Plugin) nodeStageISCSIVolume( return err } if isLUKS { - luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, req.VolumeContext["internalName"]) + luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, + req.VolumeContext["internalName"]) if err != nil { return err } @@ -1116,6 +1142,20 @@ func (p *Plugin) nodeStageISCSIVolume( } } + if mpathSize > 0 { + Logc(ctx).Warn("Multipath device size may not be correct, performing gratuitous resize.") + + err = p.nodeExpandVolume(ctx, publishInfo, mpathSize, stagingTargetPath, volumeId, req.GetSecrets()) + if err != nil { + Logc(ctx).WithFields(LogFields{ + "multipathDevice": publishInfo.DevicePath, + "volumeID": volumeId, + "size": mpathSize, + "err": err, + }).Warn("Attempt to perform gratuitous resize failed.") + } + } + volTrackingInfo := &utils.VolumeTrackingInfo{ VolumePublishInfo: *publishInfo, StagingTargetPath: stagingTargetPath, @@ -1137,28 +1177,32 @@ func (p *Plugin) nodeStageISCSIVolume( func (p *Plugin) ensureAttachISCSIVolume( ctx context.Context, req *csi.NodeStageVolumeRequest, mountpoint string, publishInfo *utils.VolumePublishInfo, attachTimeout time.Duration, -) error { +) (int64, error) { + var err error + var mpathSize int64 + // Perform the login/rescan/discovery/(optionally)format, mount & get the device back in the publish info - if err := utils.AttachISCSIVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint, publishInfo, + if mpathSize, err = utils.AttachISCSIVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint, publishInfo, req.GetSecrets(), attachTimeout); err != nil { // Did we fail to log in? if errors.IsAuthError(err) { // Update CHAP info from the controller and try one more time. Logc(ctx).Warn("iSCSI login failed; will retrieve CHAP credentials from Trident controller and try again.") if err = p.updateChapInfoFromController(ctx, req, publishInfo); err != nil { - return status.Error(codes.Internal, err.Error()) + return mpathSize, status.Error(codes.Internal, err.Error()) } - if err = utils.AttachISCSIVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint, publishInfo, + if mpathSize, err = utils.AttachISCSIVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint, + publishInfo, req.GetSecrets(), attachTimeout); err != nil { // Bail out no matter what as we've now tried with updated credentials - return status.Error(codes.Internal, err.Error()) + return mpathSize, status.Error(codes.Internal, err.Error()) } } else { - return status.Error(codes.Internal, fmt.Sprintf("failed to stage volume: %v", err)) + return mpathSize, status.Error(codes.Internal, fmt.Sprintf("failed to stage volume: %v", err)) } } - return nil + return mpathSize, nil } func (p *Plugin) updateChapInfoFromController( @@ -1190,18 +1234,41 @@ func (p *Plugin) nodeUnstageISCSIVolume( return fmt.Errorf("could not parse LUKSEncryption into a bool, got %v", publishInfo.LUKSEncryption) } if isLUKS { - err := utils.EnsureLUKSDeviceClosed(ctx, publishInfo.DevicePath) + // Before closing the device, get the corresponding DM device. + publishedLUKsDevice, err := utils.GetUnderlyingDevicePathForLUKSDevice(ctx, publishInfo.DevicePath) + if err != nil { + // No need to return an error + Logc(ctx).WithFields(LogFields{ + "devicePath": publishInfo.DevicePath, + "LUN": publishInfo.IscsiLunNumber, + "err": err, + }).Error("Failed to verify the multipath device, could not determine" + + " underlying device for LUKS mapping.") + } + + err = utils.EnsureLUKSDeviceClosed(ctx, publishInfo.DevicePath) if err != nil { return err } + + // For the future steps LUKs device path is not really useful, either it should be + // DM device or empty. + publishInfo.DevicePath = publishedLUKsDevice } } // Delete the device from the host. - unmappedMpathDevice, err := utils.PrepareDeviceForRemoval(ctx, int(publishInfo.IscsiLunNumber), - publishInfo.IscsiTargetIQN, p.unsafeDetach, force) - if nil != err && !p.unsafeDetach { - return status.Error(codes.Internal, err.Error()) + unmappedMpathDevice, err := utils.PrepareDeviceForRemoval(ctx, publishInfo, nil, p.unsafeDetach, force) + if err != nil { + if errors.IsISCSISameLunNumberError(err) { + // There is a need to pass all the publish infos this time + unmappedMpathDevice, err = utils.PrepareDeviceForRemoval(ctx, publishInfo, p.readAllTrackingFiles(ctx), + p.unsafeDetach, force) + } + + if err != nil && !p.unsafeDetach { + return status.Error(codes.Internal, err.Error()) + } } // Get map of hosts and sessions for given Target IQN. @@ -1323,7 +1390,8 @@ func (p *Plugin) nodePublishISCSIVolume( } if isLUKS { // Rotate the LUKS passphrase if needed, on failure, log and continue to publish - luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, req.VolumeContext["internalName"]) + luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, + req.VolumeContext["internalName"]) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -1887,7 +1955,7 @@ func (p *Plugin) selfHealingRectifySession(ctx context.Context, portal string, a publishedCHAPCredentials := publishInfo.IscsiChapInfo - if err = p.ensureAttachISCSIVolume(ctx, req, "", &publishInfo, iSCSILoginTimeout); err != nil { + if _, err = p.ensureAttachISCSIVolume(ctx, req, "", &publishInfo, iSCSILoginTimeout); err != nil { return fmt.Errorf("failed to login to the target") } @@ -2121,7 +2189,8 @@ func (p *Plugin) nodeStageNVMeVolume( publishInfo.NVMeTargetIPs = strings.Split(req.PublishContext["nvmeTargetIPs"], ",") publishInfo.SANType = req.PublishContext["SANType"] - if err := utils.AttachNVMeVolumeRetry(ctx, req.VolumeContext["internalName"], "", publishInfo, nil, nvmeAttachTimeout); err != nil { + if err := utils.AttachNVMeVolumeRetry(ctx, req.VolumeContext["internalName"], "", publishInfo, nil, + nvmeAttachTimeout); err != nil { return err } @@ -2131,7 +2200,8 @@ func (p *Plugin) nodeStageNVMeVolume( } if isLUKS { - luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, req.VolumeContext["internalName"]) + luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, + req.VolumeContext["internalName"]) if err != nil { return err } @@ -2243,7 +2313,8 @@ func (p *Plugin) nodePublishNVMeVolume( } if isLUKS { // Rotate the LUKS passphrase if needed, on failure, log and continue to publish - luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, req.VolumeContext["internalName"]) + luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, + req.VolumeContext["internalName"]) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/frontend/rest/controller_routes.go b/frontend/rest/controller_routes.go index 051b52d42..bcfe6d97a 100644 --- a/frontend/rest/controller_routes.go +++ b/frontend/rest/controller_routes.go @@ -23,10 +23,12 @@ type Routes []Route const ( // arbitrarily large number to limit maximum routines waiting for global lock - updateNodeRateLimit = 10000.0 // requests per second - updateNodeBurst = 10000 // maximum request burst - getNodeRateLimit = 10000.0 // requests per second - getNodeBurst = 10000 // maximum request burst + updateNodeRateLimit = 10000.0 // requests per second + updateNodeBurst = 10000 // maximum request burst + getNodeRateLimit = 10000.0 // requests per second + getNodeBurst = 10000 // maximum request burst + addOrUpdateNodeRateLimit = 50.0 // requests per second + addOrUpdateNodeBurst = 100 // maximum request burst ) var controllerRoutes = Routes{ @@ -160,7 +162,9 @@ var controllerRoutes = Routes{ "AddOrUpdateNode", "PUT", config.NodeURL + "/{node}", - nil, + []mux.MiddlewareFunc{ + rateLimiterMiddleware(addOrUpdateNodeRateLimit, addOrUpdateNodeBurst), + }, AddNode, }, Route{ diff --git a/go.mod b/go.mod index 35d4a2afe..00af5f0b1 100755 --- a/go.mod +++ b/go.mod @@ -3,23 +3,23 @@ module github.com/netapp/trident go 1.20 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/netapp/armnetapp/v4 v4.0.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armfeatures v1.1.0 - github.com/RoaringBitmap/roaring v1.2.3 - github.com/cenkalti/backoff/v4 v4.2.0 + github.com/RoaringBitmap/roaring v1.3.0 + github.com/cenkalti/backoff/v4 v4.2.1 github.com/container-storage-interface/spec v1.8.0 github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 github.com/dustin/go-humanize v1.0.2-0.20230319011938-bd1b3e1a20a1 - github.com/elastic/go-sysinfo v1.10.0 + github.com/elastic/go-sysinfo v1.11.0 github.com/evanphx/json-patch/v5 v5.6.0 github.com/ghodss/yaml v1.0.1-0.20220118164431-d8423dcdf344 // 1/18/2022 - github.com/go-openapi/errors v0.20.3 - github.com/go-openapi/runtime v0.25.0 + github.com/go-openapi/errors v0.20.4 + github.com/go-openapi/runtime v0.26.0 github.com/go-openapi/strfmt v0.21.7 - github.com/go-openapi/swag v0.22.3 + github.com/go-openapi/swag v0.22.4 github.com/go-openapi/validate v0.22.1 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 @@ -27,47 +27,51 @@ require ( github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/kr/secureheader v0.2.0 - github.com/kubernetes-csi/csi-lib-utils v0.13.0 + github.com/kubernetes-csi/csi-lib-utils v0.14.0 github.com/kubernetes-csi/csi-proxy/client v1.1.2 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 - github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20211207080247-460296229913 + github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20230502164821-3079e7b80fca github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/olekukonko/tablewriter v0.0.6-0.20210304033056-74c60be0ef68 - github.com/openshift/api v0.0.0-20230406152840-ce21e3fe5da2 - github.com/prometheus/client_golang v1.14.0 - github.com/sirupsen/logrus v1.9.0 + github.com/olekukonko/tablewriter v0.0.6-0.20230422125635-f6b4e4ae60d8 + github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 + github.com/prometheus/client_golang v1.16.0 + github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.9.5 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 github.com/vishvananda/netlink v1.1.0 - github.com/zcalusic/sysinfo v0.9.6-0.20220805135214-99e836ba64f2 + github.com/zcalusic/sysinfo v1.0.1 go.uber.org/multierr v1.11.0 // github.com/uber-go/multierr - golang.org/x/crypto v0.10.0 // github.com/golang/crypto - golang.org/x/net v0.10.0 // github.com/golang/net - golang.org/x/oauth2 v0.7.0 // github.com/golang/oauth2 - golang.org/x/sys v0.9.0 // github.com/golang/sys - golang.org/x/text v0.10.0 // github.com/golang/text + golang.org/x/crypto v0.11.0 // github.com/golang/crypto + golang.org/x/net v0.12.0 // github.com/golang/net + golang.org/x/oauth2 v0.10.0 // github.com/golang/oauth2 + golang.org/x/sys v0.10.0 // github.com/golang/sys + golang.org/x/text v0.11.0 // github.com/golang/text golang.org/x/time v0.3.0 // github.com/golang/time - google.golang.org/grpc v1.54.0 // github.com/grpc/grpc-go - k8s.io/api v0.27.1 // github.com/kubernetes/api - k8s.io/apiextensions-apiserver v0.26.3 // github.com/kubernetes/apiextensions-apiserver - k8s.io/apimachinery v0.27.1 // github.com/kubernetes/apimachinery - k8s.io/client-go v0.27.1 // github.com/kubernetes/client-go - k8s.io/mount-utils v0.26.3 // github.com/kubernetes/mount-utils + google.golang.org/grpc v1.56.2 // github.com/grpc/grpc-go + k8s.io/api v0.27.3 // github.com/kubernetes/api + k8s.io/apiextensions-apiserver v0.27.3 // github.com/kubernetes/apiextensions-apiserver + k8s.io/apimachinery v0.27.3 // github.com/kubernetes/apimachinery + k8s.io/client-go v0.27.3 // github.com/kubernetes/client-go + k8s.io/mount-utils v0.27.3 // github.com/kubernetes/mount-utils ) require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 - sigs.k8s.io/cloud-provider-azure v1.27.5 - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20230614113418-76168d52887e + sigs.k8s.io/cloud-provider-azure v1.27.6 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20230723234811-915dd11ba556 ) require ( - cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute v1.20.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v3 v3.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 // indirect github.com/Azure/go-armbalancer v0.0.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect @@ -84,11 +88,11 @@ require ( github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.1 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.8 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -102,7 +106,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.10 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -117,8 +121,8 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/rivo/uniseg v0.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect @@ -128,18 +132,18 @@ require ( go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.9.0 // indirect + golang.org/x/term v0.10.0 // indirect golang.org/x/tools v0.9.3 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect - k8s.io/component-base v0.27.1 // indirect + k8s.io/component-base v0.27.3 // indirect k8s.io/klog/v2 v2.90.1 // indirect - k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index d41b7ffc4..de1ebdd64 100755 --- a/go.sum +++ b/go.sum @@ -23,8 +23,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -41,25 +41,32 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.0.0 h1:zpMyM8MoI8ZR/KNcfTothBjV5oTm6QVpuPwz/9TXQ1Q= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.0.0/go.mod h1:mXdzU0jht34j8BVO6q+sns1M1CYmHdq1AA9mRHeFvv0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.0.0 h1:PcCx8mii9UPb0ztRpw8JF4/pKEfxXeXtL1GETENP4pU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.0.0/go.mod h1:FoPQz7wDNpmE619+efw24epeh69HYI6c3jbwz8jgPMw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/netapp/armnetapp/v4 v4.0.0 h1:yNyKx1DKBWXs6EP6WaaVgRuX9ilmOj8emmAyKfqHBYA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/netapp/armnetapp/v4 v4.0.0/go.mod h1:CLToNi36LmwVMgHuqOgfG8M0ph7VQaEUoqpO35/1wqU= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1/go.mod h1:Bzf34hhAE9NSxailk8xVeLEZbUjOXcC+GnU1mMKdhLw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.0 h1:ht6xbz1wlfgLAwho2Fv4nKqq2ev/PCDCoX8H0MXf6q4= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.0/go.mod h1:21rlzm+SuYrS9ARS92XEGxcHQeLVDcaY2YV30rHjSd4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v3 v3.0.0 h1:C4GK2KFbzM07iThUKZccqt3kB6MpT9mHAw147uSlOYE= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v3 v3.0.0/go.mod h1:npZqEMqFcvlOus6RnbX4vH2Wr/noDYsp/oCdX/4bWso= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.1.0 h1:rR8ZW79lE/ppfXTfiYSnMFv5EzmVuY4pfZWIkscIJ64= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.1.0/go.mod h1:y2zXtLSMM/X5Mfawq0lOftpWn3f4V6OCsRdINsvWBPI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1 h1:eoQrCw9DMThzbJ32fHXZtISnURk6r0TozXiWuTsay5s= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1/go.mod h1:21rlzm+SuYrS9ARS92XEGxcHQeLVDcaY2YV30rHjSd4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armfeatures v1.1.0 h1:NhvID5juwkPxMUD8hdV3no0nugxk9QM8d5OSLskjOLM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armfeatures v1.1.0/go.mod h1:hDdPReNCfyh7kmZm6uKm3uH3OQkGn8gbeb1c/JkmEdE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/Azure/go-armbalancer v0.0.2 h1:NVnxsTWHI5/fEzL6k6TjxPUfcB/3Si3+HFOZXOu0QtA= github.com/Azure/go-armbalancer v0.0.2/go.mod h1:yTg7MA/8YnfKQc9o97tzAJ7fbdVkod1xGsIvKmhYPRE= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= @@ -72,20 +79,13 @@ github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2B github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY= -github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/RoaringBitmap/roaring v1.3.0 h1:aQmu9zQxDU0uhwR8SXOH/OrqEf+X8A0LQmwW3JX8Lcg= +github.com/RoaringBitmap/roaring v1.3.0/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= @@ -93,12 +93,10 @@ github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edY github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -119,8 +117,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/docker v23.0.3+incompatible h1:9GhVsShNWz1hO//9BNg/dpMnZW25KydO4wtVxWAIbho= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 h1:YcvzLmdrP/b8kLAGJ8GT7bdncgCAiWxJZIlt84D+RJg= @@ -129,8 +127,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.2-0.20230319011938-bd1b3e1a20a1 h1:xWuCuGTxFJBS1aR92jQcf67YS3N6DozQ9xhlM421MPI= github.com/dustin/go-humanize v1.0.2-0.20230319011938-bd1b3e1a20a1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elastic/go-sysinfo v1.10.0 h1:8mhFXJrWFLpeskULp0sGq+jt5DA0AaPU+RfGDOJQPUA= -github.com/elastic/go-sysinfo v1.10.0/go.mod h1:RgpZTzVQX1UUNtbCnTYE5xzUaZ9+UU4ydR2ZXyzjkBg= +github.com/elastic/go-sysinfo v1.11.0 h1:QW+6BF1oxBoAprH3w2yephF7xLkrrSXj7gl2xC2BM4w= +github.com/elastic/go-sysinfo v1.11.0/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= @@ -154,48 +152,48 @@ github.com/ghodss/yaml v1.0.1-0.20220118164431-d8423dcdf344/go.mod h1:GIjDIg/heH github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= -github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc= -github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -224,7 +222,6 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= @@ -323,16 +320,10 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9 github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -340,9 +331,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -353,8 +342,8 @@ github.com/kr/secureheader v0.2.0/go.mod h1:PfvbGMMfqBg6z+vxKGKbSJRcmASZc4klL5Di github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/csi-lib-utils v0.13.0 h1:QrTdZVZbHlaSUBN9ReayBPnnF1N0edFIpUKBwVIBW3w= -github.com/kubernetes-csi/csi-lib-utils v0.13.0/go.mod h1:JS9eDIZmSjx4F9o0bLTVK/qfhIIOifdjEfVXzxWapfE= +github.com/kubernetes-csi/csi-lib-utils v0.14.0 h1:pusB32LkSd7GhuT8Z6cyRFqByujc28ygWV97ndaT19s= +github.com/kubernetes-csi/csi-lib-utils v0.14.0/go.mod h1:uX8xidqxGJOLXtsfCCVsxWtZl/9NiLyd2DD3Nb+KoP4= github.com/kubernetes-csi/csi-proxy/client v1.1.2 h1:zRZOv9RXAd9d/46RIiVkzyssIw5tAK7IJlYIk3gn9FU= github.com/kubernetes-csi/csi-proxy/client v1.1.2/go.mod h1:SfK4HVKQdMH5KrffivddAWgX5hl3P5KmnuOTBbDNboU= github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 h1:cMM5AB37e9aRGjErygVT6EuBPB6s5a+l95OPERmSlVM= @@ -368,13 +357,12 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20211207080247-460296229913 h1:4QObo/l+9iqntBhKIEt5ZIs9r8MEZR8Q9fq93AVi9Bw= -github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20211207080247-460296229913/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= +github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20230502164821-3079e7b80fca h1:TsdNYsfVbY0KKLQPNWupAj/+8getyMQd/5X3haqHvt4= +github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20230502164821-3079e7b80fca/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= -github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= @@ -392,8 +380,6 @@ github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGp github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= @@ -401,19 +387,17 @@ github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.6-0.20210304033056-74c60be0ef68 h1:sB6FDvBA1aVDINTWnVSrcJ95fV/QkN6fTJgksZOT8vY= -github.com/olekukonko/tablewriter v0.0.6-0.20210304033056-74c60be0ef68/go.mod h1:8Hf+pH6thup1sPZPD+NLg7d6vbpsdilu9CPIeikvgMQ= -github.com/onsi/ginkgo/v2 v2.10.0 h1:sfUl4qgLdvkChZrWCYndY2EAu9BRIw1YphNAzy1VNWs= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/olekukonko/tablewriter v0.0.6-0.20230422125635-f6b4e4ae60d8 h1:eZ1u2pOgYpOBuhRmW9qo8C7tXKtqSRNI1U3PHcpcObQ= +github.com/olekukonko/tablewriter v0.0.6-0.20230422125635-f6b4e4ae60d8/go.mod h1:8Hf+pH6thup1sPZPD+NLg7d6vbpsdilu9CPIeikvgMQ= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/gomega v1.27.9 h1:qIyVWbOsvQEye2QCqLsNSeH/5L1RS9vS382erEWfT3o= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/openshift/api v0.0.0-20230406152840-ce21e3fe5da2 h1:lpKBKpI8or60mSEEKrpS67cevp8XaW8vfmXSwCZXKd0= -github.com/openshift/api v0.0.0-20230406152840-ce21e3fe5da2/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= +github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 h1:j7LIIr4Vrdy4Dpd4bw2j53UXUSjA1eXXC0x89g9kyAI= +github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= @@ -426,32 +410,15 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -460,13 +427,11 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= @@ -513,10 +478,11 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zcalusic/sysinfo v0.9.6-0.20220805135214-99e836ba64f2 h1:kbjgNu2XjGjB8nvV/BHZS1J8FK8ONWrFQX3uHMDp2Lc= -github.com/zcalusic/sysinfo v0.9.6-0.20220805135214-99e836ba64f2/go.mod h1:30ZyzePdcgO8cQgyXtuPpg1FPCaHAv4kTap0HE8wBjo= +github.com/zcalusic/sysinfo v1.0.1 h1:cVh8q3codjh43AGRTa54dJ2Zq+qPejv8n2VWpxKViwc= +github.com/zcalusic/sysinfo v1.0.1/go.mod h1:LxwKwtQdbTIQc65drhjQzYzt0o7jfB80LrrZm7SWn8o= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -529,7 +495,7 @@ go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs= +go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -546,8 +512,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -586,7 +552,6 @@ golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -594,7 +559,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -619,13 +583,10 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -635,10 +596,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -651,10 +610,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -672,7 +630,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -685,8 +642,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -694,26 +649,21 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -723,8 +673,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -855,8 +805,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -877,8 +827,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -892,9 +842,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -908,8 +857,6 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -929,31 +876,31 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0= -k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E= -k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE= -k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ= -k8s.io/apimachinery v0.27.1 h1:EGuZiLI95UQQcClhanryclaQE6xjg1Bts6/L3cD7zyc= -k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= -k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8= -k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA= -k8s.io/component-base v0.27.1 h1:kEB8p8lzi4gCs5f2SPU242vOumHJ6EOsOnDM3tTuDTM= -k8s.io/component-base v0.27.1/go.mod h1:UGEd8+gxE4YWoigz5/lb3af3Q24w98pDseXcXZjw+E0= +k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= +k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= +k8s.io/apiextensions-apiserver v0.27.3 h1:xAwC1iYabi+TDfpRhxh4Eapl14Hs2OftM2DN5MpgKX4= +k8s.io/apiextensions-apiserver v0.27.3/go.mod h1:BH3wJ5NsB9XE1w+R6SSVpKmYNyIiyIz9xAmBl8Mb+84= +k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= +k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= +k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= +k8s.io/component-base v0.27.3 h1:g078YmdcdTfrCE4fFobt7qmVXwS8J/3cI1XxRi/2+6k= +k8s.io/component-base v0.27.3/go.mod h1:JNiKYcGImpQ44iwSYs6dysxzR9SxIIgQalk4HaCNVUY= k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= -k8s.io/mount-utils v0.26.3 h1:FxMDiPLCkrYgonfSaKHWltLNkyTg3Q/Xrwn94uwhd8k= -k8s.io/mount-utils v0.26.3/go.mod h1:95yx9K6N37y8YZ0/lUh9U6ITosMODNaW0/v4wvaa0Xw= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/mount-utils v0.27.3 h1:oubkDKLTZUneW27wgyOmp8a1AAZj04vGmtq+YW8wdvY= +k8s.io/mount-utils v0.27.3/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs= k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/cloud-provider-azure v1.27.5 h1:D/xONxiecBrWw+45YiygUryntb6MAoMNUsHYH/Lg1hc= -sigs.k8s.io/cloud-provider-azure v1.27.5/go.mod h1:kpbVqvl78SN95q4sE3AAj0SW96JxtCDm0Z4+8Q1lBOY= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20230614113418-76168d52887e h1:6w35nHkuAkSgjBEwznlgutHfpgXe7hO93acuxvxeTYg= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20230614113418-76168d52887e/go.mod h1:tntSHYpEsHw8gv5HVh5lPpDkhfXsPzwqSoihLhJCgZw= +sigs.k8s.io/cloud-provider-azure v1.27.6 h1:HRbzXj4ZTU3I1hkRFtYYer8oq2LRSnGqyO0j9bojh+Q= +sigs.k8s.io/cloud-provider-azure v1.27.6/go.mod h1:SHkILxyphjhYTSVGHebkhjT7C4OuhVGULCJPEGHimig= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20230723234811-915dd11ba556 h1:lFPa+p1LkeQIDX0BW565eLMv7lWXX6c1NlFIg5pv3P4= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20230723234811-915dd11ba556/go.mod h1:Dy+UN94aM6/T751gS4rQPqTITiBKbQCigX79SllyFxs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/hack/VERSION b/hack/VERSION index 92f023e6e..942d403ae 100644 --- a/hack/VERSION +++ b/hack/VERSION @@ -1 +1 @@ -23.04.0 +23.07.0 diff --git a/helm/trident-operator/Chart.yaml b/helm/trident-operator/Chart.yaml index 52ac2109f..6c1604ab8 100644 --- a/helm/trident-operator/Chart.yaml +++ b/helm/trident-operator/Chart.yaml @@ -11,4 +11,4 @@ icon: "https://raw.githubusercontent.com/NetApp/trident/master/logo/trident.png" # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 23.04.0 +appVersion: 23.07.0 diff --git a/helm/trident-operator/values.yaml b/helm/trident-operator/values.yaml index 3bcd2b148..9ae3645fc 100644 --- a/helm/trident-operator/values.yaml +++ b/helm/trident-operator/values.yaml @@ -77,7 +77,7 @@ tridentSilenceAutosupport: false tridentAutosupportImage: "" # tridentAutosupportImageTag allows overriding the tag of the image for Trident's Autosupport container. -tridentAutosupportImageTag: "23.01" +tridentAutosupportImageTag: "23.04" # tridentAutosupportProxy allows Trident's autosupport container to phone home via an HTTP proxy. tridentAutosupportProxy: "" diff --git a/mocks/mock_storage_drivers/mock_ontap/mock_api.go b/mocks/mock_storage_drivers/mock_ontap/mock_api.go index 9e456fdf3..5dc1a05b1 100644 --- a/mocks/mock_storage_drivers/mock_ontap/mock_api.go +++ b/mocks/mock_storage_drivers/mock_ontap/mock_api.go @@ -992,17 +992,18 @@ func (mr *MockOntapAPIMockRecorder) NVMeEnsureNamespaceMapped(arg0, arg1, arg2 i } // NVMeEnsureNamespaceUnmapped mocks base method. -func (m *MockOntapAPI) NVMeEnsureNamespaceUnmapped(arg0 context.Context, arg1, arg2 string) error { +func (m *MockOntapAPI) NVMeEnsureNamespaceUnmapped(arg0 context.Context, arg1, arg2, arg3 string) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NVMeEnsureNamespaceUnmapped", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "NVMeEnsureNamespaceUnmapped", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 } // NVMeEnsureNamespaceUnmapped indicates an expected call of NVMeEnsureNamespaceUnmapped. -func (mr *MockOntapAPIMockRecorder) NVMeEnsureNamespaceUnmapped(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOntapAPIMockRecorder) NVMeEnsureNamespaceUnmapped(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeEnsureNamespaceUnmapped", reflect.TypeOf((*MockOntapAPI)(nil).NVMeEnsureNamespaceUnmapped), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeEnsureNamespaceUnmapped", reflect.TypeOf((*MockOntapAPI)(nil).NVMeEnsureNamespaceUnmapped), arg0, arg1, arg2, arg3) } // NVMeIsNamespaceMapped mocks base method. @@ -1094,6 +1095,20 @@ func (mr *MockOntapAPIMockRecorder) NVMeNamespaceSetSize(arg0, arg1, arg2 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeNamespaceSetSize", reflect.TypeOf((*MockOntapAPI)(nil).NVMeNamespaceSetSize), arg0, arg1, arg2) } +// NVMeRemoveHostFromSubsystem mocks base method. +func (m *MockOntapAPI) NVMeRemoveHostFromSubsystem(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NVMeRemoveHostFromSubsystem", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NVMeRemoveHostFromSubsystem indicates an expected call of NVMeRemoveHostFromSubsystem. +func (mr *MockOntapAPIMockRecorder) NVMeRemoveHostFromSubsystem(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeRemoveHostFromSubsystem", reflect.TypeOf((*MockOntapAPI)(nil).NVMeRemoveHostFromSubsystem), arg0, arg1, arg2) +} + // NVMeSubsystemAddNamespace mocks base method. func (m *MockOntapAPI) NVMeSubsystemAddNamespace(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() diff --git a/mocks/mock_storage_drivers/mock_ontap/mock_ontap_rest_interface.go b/mocks/mock_storage_drivers/mock_ontap/mock_ontap_rest_interface.go index 39c134de6..f0057935c 100644 --- a/mocks/mock_storage_drivers/mock_ontap/mock_ontap_rest_interface.go +++ b/mocks/mock_storage_drivers/mock_ontap/mock_ontap_rest_interface.go @@ -1156,6 +1156,20 @@ func (mr *MockRestClientInterfaceMockRecorder) NVMeNamespaceSize(arg0, arg1 inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeNamespaceSize", reflect.TypeOf((*MockRestClientInterface)(nil).NVMeNamespaceSize), arg0, arg1) } +// NVMeRemoveHostFromSubsystem mocks base method. +func (m *MockRestClientInterface) NVMeRemoveHostFromSubsystem(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NVMeRemoveHostFromSubsystem", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NVMeRemoveHostFromSubsystem indicates an expected call of NVMeRemoveHostFromSubsystem. +func (mr *MockRestClientInterfaceMockRecorder) NVMeRemoveHostFromSubsystem(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeRemoveHostFromSubsystem", reflect.TypeOf((*MockRestClientInterface)(nil).NVMeRemoveHostFromSubsystem), arg0, arg1, arg2) +} + // NVMeSubsystemAddNamespace mocks base method. func (m *MockRestClientInterface) NVMeSubsystemAddNamespace(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() diff --git a/mocks/mock_utils/mock_iscsi_utils.go b/mocks/mock_utils/mock_iscsi_utils.go index 0c8e96b20..c38b43504 100644 --- a/mocks/mock_utils/mock_iscsi_utils.go +++ b/mocks/mock_utils/mock_iscsi_utils.go @@ -64,6 +64,51 @@ func (mr *MockIscsiReconcileUtilsMockRecorder) GetISCSIHostSessionMapForTarget(a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetISCSIHostSessionMapForTarget", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetISCSIHostSessionMapForTarget), arg0, arg1) } +// GetMultipathDeviceBySerial mocks base method. +func (m *MockIscsiReconcileUtils) GetMultipathDeviceBySerial(arg0 context.Context, arg1 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultipathDeviceBySerial", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMultipathDeviceBySerial indicates an expected call of GetMultipathDeviceBySerial. +func (mr *MockIscsiReconcileUtilsMockRecorder) GetMultipathDeviceBySerial(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultipathDeviceBySerial", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetMultipathDeviceBySerial), arg0, arg1) +} + +// GetMultipathDeviceDisks mocks base method. +func (m *MockIscsiReconcileUtils) GetMultipathDeviceDisks(arg0 context.Context, arg1 string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultipathDeviceDisks", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMultipathDeviceDisks indicates an expected call of GetMultipathDeviceDisks. +func (mr *MockIscsiReconcileUtilsMockRecorder) GetMultipathDeviceDisks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultipathDeviceDisks", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetMultipathDeviceDisks), arg0, arg1) +} + +// GetMultipathDeviceUUID mocks base method. +func (m *MockIscsiReconcileUtils) GetMultipathDeviceUUID(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultipathDeviceUUID", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMultipathDeviceUUID indicates an expected call of GetMultipathDeviceUUID. +func (mr *MockIscsiReconcileUtilsMockRecorder) GetMultipathDeviceUUID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultipathDeviceUUID", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetMultipathDeviceUUID), arg0) +} + // GetSysfsBlockDirsForLUN mocks base method. func (m *MockIscsiReconcileUtils) GetSysfsBlockDirsForLUN(arg0 int, arg1 map[int]int) []string { m.ctrl.T.Helper() diff --git a/operator/Dockerfile b/operator/Dockerfile index cc1ac32e1..bb935dd6c 100644 --- a/operator/Dockerfile +++ b/operator/Dockerfile @@ -1,6 +1,6 @@ ARG ARCH=amd64 -FROM --platform=linux/${ARCH} gcr.io/distroless/static@sha256:a01d47d4036cae5a67a9619e3d06fa14a6811a2247b4da72b4233ece4efebd57 +FROM --platform=linux/${ARCH} gcr.io/distroless/static@sha256:7198a357ff3a8ef750b041324873960cf2153c11cc50abb9d8d5f8bb089f6b4e LABEL maintainers="The NetApp Trident Team" \ app="trident-operator.netapp.io" description="Trident Operator" diff --git a/operator/controllers/orchestrator/installer/installer_test.go b/operator/controllers/orchestrator/installer/installer_test.go index fcc151d31..86d7d2c14 100644 --- a/operator/controllers/orchestrator/installer/installer_test.go +++ b/operator/controllers/orchestrator/installer/installer_test.go @@ -55,7 +55,7 @@ func createTestLabels() map[string]string { labels := make(map[string]string) labels[appLabelKey] = appLabelValue labels[K8sVersionLabelKey] = "v1.21.8" - labels[TridentVersionLabelKey] = "v23.04.0" + labels[TridentVersionLabelKey] = "v23.07.0" return labels } diff --git a/persistent_store/crd/apis/netapp/v1/node.go b/persistent_store/crd/apis/netapp/v1/node.go index 3ec951f66..c62112c09 100644 --- a/persistent_store/crd/apis/netapp/v1/node.go +++ b/persistent_store/crd/apis/netapp/v1/node.go @@ -39,6 +39,7 @@ func (in *TridentNode) Apply(persistent *utils.Node) error { in.Name = persistent.Name in.IQN = persistent.IQN + in.NQN = persistent.NQN in.IPs = persistent.IPs in.Deleted = persistent.Deleted in.PublicationState = string(persistent.PublicationState) @@ -68,6 +69,7 @@ func (in *TridentNode) Persistent() (*utils.Node, error) { persistent := &utils.Node{ Name: in.Name, IQN: in.IQN, + NQN: in.NQN, IPs: in.IPs, NodePrep: &utils.NodePrep{}, HostInfo: &utils.HostSystem{}, diff --git a/persistent_store/crd/apis/netapp/v1/types.go b/persistent_store/crd/apis/netapp/v1/types.go index 31a673483..3d105fe92 100644 --- a/persistent_store/crd/apis/netapp/v1/types.go +++ b/persistent_store/crd/apis/netapp/v1/types.go @@ -362,6 +362,8 @@ type TridentNode struct { NodeName string `json:"name"` // IQN is the iqn of the node IQN string `json:"iqn,omitempty"` + // NQN is the nqn of the node + NQN string `json:"nqn,omitempty"` // IPs is a list of IP addresses for the TridentNode IPs []string `json:"ips,omitempty"` // NodePrep is the current status of node preparation for this node diff --git a/storage/backend.go b/storage/backend.go index 994e0813e..9c4acce3c 100644 --- a/storage/backend.go +++ b/storage/backend.go @@ -419,6 +419,12 @@ func (b *StorageBackend) CloneVolume( "cloneVolumeInternal": cloneVolConfig.InternalName, }).Debug("Attempting volume clone.") + if cloneVolConfig.ReadOnlyClone { + if tridentconfig.DisableExtraFeatures { + return nil, errors.UnsupportedError("read only clone is not supported") + } + } + // Ensure volume is managed if cloneVolConfig.ImportNotManaged { return nil, errors.NotManagedError("volume %s is not managed by Trident", cloneVolConfig.InternalName) @@ -834,16 +840,16 @@ func (b *StorageBackend) DeleteSnapshot( "snapshotName": snapConfig.Name, }).Debug("Attempting snapshot delete.") + // Ensure snapshot is managed + if snapConfig.ImportNotManaged { + return errors.NotManagedError("snapshot %s is not managed by Trident", snapConfig.InternalName) + } + // Ensure volume is managed if volConfig.ImportNotManaged { return errors.NotManagedError("source volume %s is not managed by Trident", volConfig.InternalName) } - // Ensure snapshot is managed - if snapConfig.ImportNotManaged { - return errors.NotManagedError("source volume %s is not managed by Trident", snapConfig.InternalName) - } - // Ensure backend is ready if err := b.ensureOnlineOrDeleting(ctx); err != nil { return err @@ -1034,6 +1040,7 @@ func (b *StorageBackend) ConstructExternal(ctx context.Context) *BackendExternal for volName := range b.volumes { backendExternal.Volumes = append(backendExternal.Volumes, volName) } + return &backendExternal } diff --git a/storage/backend_test.go b/storage/backend_test.go index e0279851e..24fab5576 100644 --- a/storage/backend_test.go +++ b/storage/backend_test.go @@ -3,9 +3,13 @@ package storage import ( + "context" "testing" "github.com/stretchr/testify/assert" + + tridentconfig "github.com/netapp/trident/config" + "github.com/netapp/trident/utils/errors" ) func TestBackendState(t *testing.T) { @@ -66,3 +70,137 @@ func TestBackendState(t *testing.T) { ) } } + +func TestDeleteSnapshot_BackendOffline(t *testing.T) { + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volumeConfig := &VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + } + snapName := "snapshot" + snapInternalName := "snap.2023-05-23_175116" + snapConfig := &SnapshotConfig{ + Version: "1", + Name: snapName, + VolumeName: volumeName, + InternalName: snapInternalName, + VolumeInternalName: volumeInternalName, + } + + backend := &StorageBackend{ + state: Offline, + } + + // Both volume and snapshot not managed + err := backend.DeleteSnapshot(context.Background(), snapConfig, volumeConfig) + + assert.Errorf(t, err, "expected err") +} + +func TestDeleteSnapshot_NotManaged(t *testing.T) { + backendUUID := "test-backend" + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volumeConfig := &VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + ImportOriginalName: "import-" + volumeName, + ImportBackendUUID: "import-" + backendUUID, + ImportNotManaged: true, + LUKSPassphraseNames: nil, + } + snapName := "snapshot-import" + snapInternalName := "snap.2023-05-23_175116" + snapConfig := &SnapshotConfig{ + Version: "1", + Name: snapName, + VolumeName: volumeName, + InternalName: snapInternalName, + VolumeInternalName: volumeInternalName, + ImportNotManaged: true, + } + + backend := &StorageBackend{ + state: Online, + } + + // Both volume and snapshot not managed + err := backend.DeleteSnapshot(context.Background(), snapConfig, volumeConfig) + + assert.Errorf(t, err, "expected err") + + // Volume not managed + volumeConfig.ImportNotManaged = true + snapConfig.ImportNotManaged = false + err = backend.DeleteSnapshot(context.Background(), snapConfig, volumeConfig) + + assert.Errorf(t, err, "expected err") + + // Snapshot not managed + volumeConfig.ImportNotManaged = false + snapConfig.ImportNotManaged = true + err = backend.DeleteSnapshot(context.Background(), snapConfig, volumeConfig) + + assert.Errorf(t, err, "expected err") +} + +func TestCloneVolume_FeatureDisabled(t *testing.T) { + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volumeConfig := &VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + } + volumeConfigDest := &VolumeConfig{ + Version: "", + Name: "pvc-deadbeef-8240-4fd8-97bc-868bf064ecd4", + InternalName: "trident_pvc_deadbeef_8240_4fd8_97bc_868bf064ecd4", + ReadOnlyClone: true, + } + + backend := &StorageBackend{ + state: Offline, + } + pool := NewStoragePool(nil, "test-pool1") + + // Both volume and snapshot not managed + _, err := backend.CloneVolume(context.Background(), volumeConfig, volumeConfigDest, pool, false) + + assert.Error(t, err, "expected err") + assert.True(t, errors.IsUnsupportedError(err)) +} + +func TestCloneVolume_BackendOffline(t *testing.T) { + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volumeConfig := &VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + ReadOnlyClone: true, + } + volumeConfigDest := &VolumeConfig{ + Version: "", + Name: "pvc-deadbeef-8240-4fd8-97bc-868bf064ecd4", + InternalName: "trident_pvc_deadbeef_8240_4fd8_97bc_868bf064ecd4", + ReadOnlyClone: false, + } + + backend := &StorageBackend{ + state: Offline, + name: "test-backend", + } + pool := NewStoragePool(nil, "test-pool1") + + tridentconfig.DisableExtraFeatures = false + + // Both volume and snapshot not managed + _, err := backend.CloneVolume(context.Background(), volumeConfig, volumeConfigDest, pool, false) + + assert.Errorf(t, err, "expected err") + assert.Equal(t, err.Error(), "backend test-backend is not Online") +} diff --git a/storage_attribute/common_attributes.go b/storage_attribute/common_attributes.go index ddf80790c..f3c99ded1 100644 --- a/storage_attribute/common_attributes.go +++ b/storage_attribute/common_attributes.go @@ -48,6 +48,9 @@ const ( ISCSI = "iscsi" NVMe = "nvme" + // NVMeTransport is used to get NVMe TCP dataLIFs. + NVMeTransport = "nvme_tcp" + RequiredStorage = "requiredStorage" // deprecated, use additionalStoragePools StoragePools = "storagePools" AdditionalStoragePools = "additionalStoragePools" diff --git a/storage_drivers/azure/azure_anf_subvolume.go b/storage_drivers/azure/azure_anf_subvolume.go index fe293b4a2..3074afcf5 100644 --- a/storage_drivers/azure/azure_anf_subvolume.go +++ b/storage_drivers/azure/azure_anf_subvolume.go @@ -1615,7 +1615,7 @@ func (d *NASBlockStorageDriver) DeleteSnapshot( Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> DeleteSnapshot") defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< DeleteSnapshot") - creationToken := d.helper.GetSnapshotInternalName(snapConfig.VolumeName, snapName) + creationToken := snapConfig.InternalName subscriptionID, resourceGroup, _, netappAccount, cPoolName, volumeName, _, err := api.ParseSubvolumeID(volConfig.InternalID) diff --git a/storage_drivers/azure/azure_anf_subvolume_test.go b/storage_drivers/azure/azure_anf_subvolume_test.go index 82d835abc..21a3598a3 100644 --- a/storage_drivers/azure/azure_anf_subvolume_test.go +++ b/storage_drivers/azure/azure_anf_subvolume_test.go @@ -2421,7 +2421,7 @@ func getStructsForSubvolumeCreateSnapshot() ( snapConfig := &storage.SnapshotConfig{ Version: "1", Name: "testSnap", - InternalName: "testSnap", + InternalName: "trident-testSnap--ce20c", VolumeName: "pvc-ce20c6cf-0a75-4b27-b9bd-3f53bf520f4f", VolumeInternalName: "trident-pvc-ce20c6cf-0a75-4b27-b9bd-3f53bf520f4f-file-0", } diff --git a/storage_drivers/common.go b/storage_drivers/common.go index 923fdf47f..94b70803d 100644 --- a/storage_drivers/common.go +++ b/storage_drivers/common.go @@ -274,3 +274,55 @@ func ensureJoinedStringContainsElem(joined, elem, sep string) string { } return joined + sep + elem } + +// EncodeStorageBackendPools serializes and base64 encodes backend storage pools within the driver's backend; +// it is shared by all storage drivers. +func EncodeStorageBackendPools[P StorageBackendPool]( + ctx context.Context, config *CommonStorageDriverConfig, backendPools []P, +) ([]string, error) { + fields := LogFields{"Method": "EncodeStorageBackendPools", "Type": config.StorageDriverName} + Logd(ctx, config.StorageDriverName, + config.DebugTraceFlags["method"]).WithFields(fields).Debug(">>>> EncodeStorageBackendPools") + defer Logd(ctx, config.StorageDriverName, + config.DebugTraceFlags["method"]).WithFields(fields).Debug("<<<< EncodeStorageBackendPools") + + if len(backendPools) == 0 { + return nil, fmt.Errorf("failed encode backend pools; no storage backend pools supplied") + } + + encodedPools := make([]string, 0) + for _, pool := range backendPools { + encodedPool, err := utils.EncodeObjectToBase64String(pool) + if err != nil { + return nil, err + } + encodedPools = append(encodedPools, encodedPool) + } + return encodedPools, nil +} + +// DecodeStorageBackendPools deserializes and decodes base64 encoded pools into driver-specific backend storage pools. +func DecodeStorageBackendPools[P StorageBackendPool]( + ctx context.Context, config *CommonStorageDriverConfig, encodedPools []string, +) ([]P, error) { + fields := LogFields{"Method": "DecodeStorageBackendPools", "Type": config.StorageDriverName} + Logd(ctx, config.StorageDriverName, + config.DebugTraceFlags["method"]).WithFields(fields).Debug(">>>> DecodeStorageBackendPools") + defer Logd(ctx, config.StorageDriverName, + config.DebugTraceFlags["method"]).WithFields(fields).Debug("<<<< DecodeStorageBackendPools") + + if len(encodedPools) == 0 { + return nil, fmt.Errorf("failed to decode backend pools; no encoded backend pools supplied") + } + + backendPools := make([]P, 0) + for _, pool := range encodedPools { + var backendPool P + err := utils.DecodeBase64StringToObject(pool, &backendPool) + if err != nil { + return nil, err + } + backendPools = append(backendPools, backendPool) + } + return backendPools, nil +} diff --git a/storage_drivers/common_test.go b/storage_drivers/common_test.go index e3c704606..d15f88164 100644 --- a/storage_drivers/common_test.go +++ b/storage_drivers/common_test.go @@ -574,3 +574,106 @@ func TestEnsureJoinedStringContainsElem(t *testing.T) { }) } } + +func TestEncodeAndDecode_OntapFlexGroupStorageBackendPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + backendPools := []OntapFlexGroupStorageBackendPool{{SvmUUID: "svm0"}} + + encoded, err := EncodeStorageBackendPools[OntapFlexGroupStorageBackendPool](ctx, config, backendPools) + assert.NoError(t, err) + assert.True(t, len(backendPools) == len(encoded)) + + // Passing the type of the backend pools is required for DecodeStorageBackendPools. + decoded, err := DecodeStorageBackendPools[OntapFlexGroupStorageBackendPool](ctx, config, encoded) + assert.NoError(t, err) + assert.EqualValues(t, backendPools, decoded) +} + +func TestEncodeAndDecode_OntapStorageBackendPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + + backendPools := []OntapStorageBackendPool{ + {SvmUUID: "svm0", Aggregate: "aggr0"}, + {SvmUUID: "svm0", Aggregate: "aggr1"}, + } + + encoded, err := EncodeStorageBackendPools[OntapStorageBackendPool](ctx, config, backendPools) + assert.NoError(t, err) + assert.True(t, len(backendPools) == len(encoded)) + + // Passing the type of the backend pools is required for DecodeStorageBackendPools. + decoded, err := DecodeStorageBackendPools[OntapStorageBackendPool](ctx, config, encoded) + assert.NoError(t, err) + assert.EqualValues(t, backendPools, decoded) +} + +func TestEncodeAndDecode_OntapEconomyStorageBackendPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + + backendPools := []OntapEconomyStorageBackendPool{ + {SvmUUID: "svm0", Aggregate: "aggr0", FlexVolPrefix: "trident_qtree_pool_test_"}, + {SvmUUID: "svm0", Aggregate: "aggr1", FlexVolPrefix: "trident_qtree_pool_test_"}, + } + + encoded, err := EncodeStorageBackendPools[OntapEconomyStorageBackendPool](ctx, config, backendPools) + assert.NoError(t, err) + assert.True(t, len(backendPools) == len(encoded)) + + // Passing the type of the backend pools is required for DecodeStorageBackendPools. + decoded, err := DecodeStorageBackendPools[OntapEconomyStorageBackendPool](ctx, config, encoded) + assert.NoError(t, err) + assert.EqualValues(t, backendPools, decoded) +} + +func TestEncodeStorageBackendPools_FailsWithInvalidBackendPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + + // Backend pools are nil. + encodedPools, err := EncodeStorageBackendPools[OntapStorageBackendPool](ctx, config, nil) + assert.Error(t, err) + assert.Nil(t, encodedPools) + + // Backend pools are empty. + encodedPools, err = EncodeStorageBackendPools[OntapStorageBackendPool](ctx, config, []OntapStorageBackendPool{}) + assert.Error(t, err) + assert.Nil(t, encodedPools) +} + +func TestDecodeStorageBackendPools_FailsWithInvalidEncodedPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + + // Backend pools are nil. + backendPools, err := DecodeStorageBackendPools[OntapStorageBackendPool](ctx, config, nil) + assert.Error(t, err) + assert.Nil(t, backendPools) + + // Backend pools are empty. + backendPools, err = DecodeStorageBackendPools[OntapStorageBackendPool](ctx, config, []string{}) + assert.Error(t, err) + assert.Nil(t, backendPools) + + // Backend pools specified are not valid base64 encoded strings. + backendPools, err = DecodeStorageBackendPools[OntapStorageBackendPool](ctx, config, []string{"test", ""}) + assert.Error(t, err) + assert.Nil(t, backendPools) +} diff --git a/storage_drivers/ontap/api/abstraction.go b/storage_drivers/ontap/api/abstraction.go index 7bf4afc48..f748b397a 100644 --- a/storage_drivers/ontap/api/abstraction.go +++ b/storage_drivers/ontap/api/abstraction.go @@ -230,10 +230,11 @@ type OntapAPI interface { NVMeSubsystemAddNamespace(ctx context.Context, subsystemUUID, nsUUID string) error NVMeSubsystemRemoveNamespace(ctx context.Context, subsysUUID, nsUUID string) error NVMeAddHostToSubsystem(ctx context.Context, hostNQN, subsUUID string) error + NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsUUID string) error NVMeSubsystemGetNamespaceCount(ctx context.Context, subsysUUID string) (int64, error) NVMeIsNamespaceMapped(ctx context.Context, subsysUUID, nsUUID string) (bool, error) NVMeEnsureNamespaceMapped(ctx context.Context, subsystemUUID, nsUUID string) error - NVMeEnsureNamespaceUnmapped(ctx context.Context, subsytemUUID, nsUUID string) error + NVMeEnsureNamespaceUnmapped(ctx context.Context, hostNQN, subsytemUUID, nsUUID string) (bool, error) } type AggregateSpace interface { diff --git a/storage_drivers/ontap/api/abstraction_rest.go b/storage_drivers/ontap/api/abstraction_rest.go index 1f22ccfaa..ba9f14420 100644 --- a/storage_drivers/ontap/api/abstraction_rest.go +++ b/storage_drivers/ontap/api/abstraction_rest.go @@ -143,7 +143,7 @@ func (d OntapAPIREST) ValidateAPIVersion(ctx context.Context) error { // Make sure we're using a valid ONTAP version ontapVersion, err := d.APIVersion(ctx) if err != nil { - return fmt.Errorf("could not determine Data ONTAP version: %v", err) + return fmt.Errorf("could not determine Data ONTAP version; %v", err) } Logc(ctx).WithField("ontapVersion", ontapVersion).Debug("ONTAP version.") @@ -395,6 +395,11 @@ func lunInfoFromRestAttrsHelper(lunGetResponse *models.Lun) (*Lun, error) { state = *lunGetResponse.Status.State } + osType := "" + if lunGetResponse.OsType != nil { + osType = *lunGetResponse.OsType + } + lunInfo := &Lun{ Comment: responseComment, CreateTime: responseCreateTime, @@ -408,6 +413,7 @@ func lunInfoFromRestAttrsHelper(lunGetResponse *models.Lun) (*Lun, error) { SerialNumber: serialNumber, State: state, VolumeName: responseVolName, + OsType: osType, } return lunInfo, nil } @@ -534,14 +540,14 @@ func (d OntapAPIREST) FlexgroupCreate(ctx context.Context, volume Volume) error func (d OntapAPIREST) FlexgroupCloneSplitStart(ctx context.Context, cloneName string) error { if err := d.api.FlexgroupCloneSplitStart(ctx, cloneName); err != nil { - return fmt.Errorf("error splitting clone: %v", err) + return fmt.Errorf("error splitting clone; %v", err) } return nil } func (d OntapAPIREST) FlexgroupDisableSnapshotDirectoryAccess(ctx context.Context, volumeName string) error { if err := d.api.FlexGroupVolumeDisableSnapshotDirectoryAccess(ctx, volumeName); err != nil { - return fmt.Errorf("error disabling snapshot directory access: %v", err) + return fmt.Errorf("error disabling snapshot directory access; %v", err) } return nil @@ -571,7 +577,7 @@ func (d OntapAPIREST) FlexgroupSetComment( ctx context.Context, volumeNameInternal, volumeNameExternal, comment string, ) error { if err := d.api.FlexGroupSetComment(ctx, volumeNameInternal, comment); err != nil { - Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Modifying comment failed: %v", err) + Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Modifying comment failed; %v", err) return fmt.Errorf("volume %s modify failed: %v", volumeNameExternal, err) } return nil @@ -579,7 +585,7 @@ func (d OntapAPIREST) FlexgroupSetComment( func (d OntapAPIREST) FlexgroupSetQosPolicyGroupName(ctx context.Context, name string, qos QosPolicyGroup) error { if err := d.api.FlexgroupSetQosPolicyGroupName(ctx, name, qos); err != nil { - return fmt.Errorf("error setting quality of service policy: %v", err) + return fmt.Errorf("error setting quality of service policy; %v", err) } return nil @@ -588,14 +594,14 @@ func (d OntapAPIREST) FlexgroupSetQosPolicyGroupName(ctx context.Context, name s func (d OntapAPIREST) FlexgroupSnapshotCreate(ctx context.Context, snapshotName, sourceVolume string) error { volume, err := d.FlexgroupInfo(ctx, sourceVolume) if err != nil { - return fmt.Errorf("error looking up source volume %v: %v", sourceVolume, err) + return fmt.Errorf("error looking up source volume %v; %v", sourceVolume, err) } if volume == nil { return fmt.Errorf("error looking up source volume: %v", sourceVolume) } if err = d.api.SnapshotCreateAndWait(ctx, volume.UUID, snapshotName); err != nil { - return fmt.Errorf("could not create snapshot: %v", err) + return fmt.Errorf("could not create snapshot; %v", err) } return nil } @@ -603,7 +609,7 @@ func (d OntapAPIREST) FlexgroupSnapshotCreate(ctx context.Context, snapshotName, func (d OntapAPIREST) FlexgroupSnapshotList(ctx context.Context, sourceVolume string) (Snapshots, error) { volume, err := d.FlexgroupInfo(ctx, sourceVolume) if err != nil { - return nil, fmt.Errorf("error looking up source volume: %v", err) + return nil, fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return nil, fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -611,7 +617,7 @@ func (d OntapAPIREST) FlexgroupSnapshotList(ctx context.Context, sourceVolume st snapListResponse, err := d.api.SnapshotList(ctx, volume.UUID) if err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) + return nil, fmt.Errorf("error enumerating snapshots; %v", err) } if snapListResponse == nil { return nil, fmt.Errorf("error enumerating snapshots") @@ -641,7 +647,7 @@ func (d OntapAPIREST) FlexgroupModifyUnixPermissions( err := d.api.FlexGroupModifyUnixPermissions(ctx, volumeNameInternal, unixPermissions) if err != nil { Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Could not import volume, "+ - "modifying unix permissions failed: %v", err) + "modifying unix permissions failed; %v", err) return fmt.Errorf("volume %s modify failed: %v", volumeNameExternal, err) } @@ -769,7 +775,7 @@ func (d OntapAPIREST) GetSVMAggregateAttributes(ctx context.Context) (aggrList m func (d OntapAPIREST) ExportPolicyDestroy(ctx context.Context, policy string) error { exportPolicyDestroyResult, err := d.api.ExportPolicyDestroy(ctx, policy) if err != nil { - return fmt.Errorf("error deleting export policy: %v", err) + return fmt.Errorf("error deleting export policy; %v", err) } if exportPolicyDestroyResult == nil { return fmt.Errorf("error deleting export policy") @@ -867,7 +873,7 @@ func (d OntapAPIREST) GetSVMAggregateSpace(ctx context.Context, aggregate string func (d OntapAPIREST) VolumeDisableSnapshotDirectoryAccess(ctx context.Context, name string) error { if err := d.api.VolumeDisableSnapshotDirectoryAccess(ctx, name); err != nil { - return fmt.Errorf("error disabling snapshot directory access: %v", err) + return fmt.Errorf("error disabling snapshot directory access; %v", err) } return nil @@ -896,7 +902,7 @@ func (d OntapAPIREST) VolumeSetComment( ctx context.Context, volumeNameInternal, volumeNameExternal, comment string, ) error { if err := d.api.VolumeSetComment(ctx, volumeNameInternal, comment); err != nil { - Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Modifying comment failed: %v", err) + Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Modifying comment failed; %v", err) return fmt.Errorf("volume %s modify failed: %v", volumeNameExternal, err) } return nil @@ -948,7 +954,7 @@ func (d OntapAPIREST) VolumeModifyUnixPermissions( err := d.api.VolumeModifyUnixPermissions(ctx, volumeNameInternal, unixPermissions) if err != nil { Logc(ctx).WithField("originalName", volumeNameExternal).Errorf( - "Could not import volume, modifying unix permissions failed: %v", err) + "Could not import volume, modifying unix permissions failed; %v", err) return fmt.Errorf("volume %s modify failed: %v", volumeNameExternal, err) } @@ -1016,7 +1022,7 @@ func (d OntapAPIREST) ExportRuleCreate(ctx context.Context, policyName, desiredP ruleResponse, err = d.api.ExportRuleCreate(ctx, policyName, desiredPolicyRule, protocol, []string{"any"}, []string{"any"}, []string{"any"}) if err != nil { - err = fmt.Errorf("error creating export rule: %v", err) + err = fmt.Errorf("error creating export rule; %v", err) Logc(ctx).WithFields(LogFields{ "ExportPolicy": policyName, "ClientMatch": desiredPolicyRule, @@ -1074,7 +1080,7 @@ func (d OntapAPIREST) ExportPolicyExists(ctx context.Context, policyName string) func (d OntapAPIREST) ExportRuleList(ctx context.Context, policyName string) (map[string]int, error) { ruleListResponse, err := d.api.ExportRuleList(ctx, policyName) if err != nil { - return nil, fmt.Errorf("error listing export policy rules: %v", err) + return nil, fmt.Errorf("error listing export policy rules; %v", err) } rules := make(map[string]int) @@ -1130,7 +1136,7 @@ func (d OntapAPIREST) QtreeCount(ctx context.Context, volumeName string) (int, e func (d OntapAPIREST) QtreeListByPrefix(ctx context.Context, prefix, volumePrefix string) (Qtrees, error) { qtreeList, err := d.api.QtreeList(ctx, prefix, volumePrefix) if err != nil { - msg := fmt.Sprintf("Error listing qtrees. %v", err) + msg := fmt.Sprintf("Error listing qtrees; %v", err) Logc(ctx).Errorf(msg) return nil, fmt.Errorf(msg) } @@ -1286,7 +1292,7 @@ func (d OntapAPIREST) VolumeSnapshotCreate(ctx context.Context, snapshotName, so } if err = d.api.SnapshotCreateAndWait(ctx, volume.UUID, snapshotName); err != nil { - return fmt.Errorf("could not create snapshot: %v", err) + return fmt.Errorf("could not create snapshot; %v", err) } return nil } @@ -1325,7 +1331,7 @@ func (d OntapAPIREST) pollVolumeExistence(ctx context.Context, volumeName string func (d OntapAPIREST) VolumeCloneCreate(ctx context.Context, cloneName, sourceName, snapshot string, async bool) error { err := d.api.VolumeCloneCreateAsync(ctx, cloneName, sourceName, snapshot) if err != nil { - return fmt.Errorf("error creating clone: %v", err) + return fmt.Errorf("error creating clone; %v", err) } return nil @@ -1415,7 +1421,7 @@ func (d OntapAPIREST) VolumeSnapshotInfo(ctx context.Context, snapshotName, sour volume, err := d.VolumeInfo(ctx, sourceVolume) if err != nil { - return emptyResult, fmt.Errorf("error looking up source volume: %v", err) + return emptyResult, fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return emptyResult, fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -1451,7 +1457,7 @@ func (d OntapAPIREST) VolumeSnapshotInfo(ctx context.Context, snapshotName, sour func (d OntapAPIREST) VolumeSnapshotList(ctx context.Context, sourceVolume string) (Snapshots, error) { volume, err := d.VolumeInfo(ctx, sourceVolume) if err != nil { - return nil, fmt.Errorf("error looking up source volume: %v", err) + return nil, fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return nil, fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -1459,7 +1465,7 @@ func (d OntapAPIREST) VolumeSnapshotList(ctx context.Context, sourceVolume strin snapListResponse, err := d.api.SnapshotList(ctx, volume.UUID) if err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) + return nil, fmt.Errorf("error enumerating snapshots; %v", err) } if snapListResponse == nil { return nil, fmt.Errorf("error enumerating snapshots") @@ -1485,7 +1491,7 @@ func (d OntapAPIREST) VolumeSnapshotList(ctx context.Context, sourceVolume strin func (d OntapAPIREST) VolumeSetQosPolicyGroupName(ctx context.Context, name string, qos QosPolicyGroup) error { if err := d.api.VolumeSetQosPolicyGroupName(ctx, name, qos); err != nil { - return fmt.Errorf("error setting quality of service policy: %v", err) + return fmt.Errorf("error setting quality of service policy; %v", err) } return nil @@ -1493,7 +1499,7 @@ func (d OntapAPIREST) VolumeSetQosPolicyGroupName(ctx context.Context, name stri func (d OntapAPIREST) VolumeCloneSplitStart(ctx context.Context, cloneName string) error { if err := d.api.VolumeCloneSplitStart(ctx, cloneName); err != nil { - return fmt.Errorf("error splitting clone: %v", err) + return fmt.Errorf("error splitting clone; %v", err) } return nil } @@ -1502,7 +1508,7 @@ func (d OntapAPIREST) SnapshotRestoreVolume( ctx context.Context, snapshotName, sourceVolume string, ) error { if err := d.api.SnapshotRestoreVolume(ctx, snapshotName, sourceVolume); err != nil { - return fmt.Errorf("error restoring snapshot: %v", err) + return fmt.Errorf("error restoring snapshot; %v", err) } return nil @@ -1510,7 +1516,7 @@ func (d OntapAPIREST) SnapshotRestoreVolume( func (d OntapAPIREST) SnapshotRestoreFlexgroup(ctx context.Context, snapshotName, sourceVolume string) error { if err := d.api.SnapshotRestoreFlexgroup(ctx, snapshotName, sourceVolume); err != nil { - return fmt.Errorf("error restoring snapshot: %v", err) + return fmt.Errorf("error restoring snapshot; %v", err) } return nil @@ -1522,7 +1528,7 @@ func (d OntapAPIREST) SnapshotDeleteByNameAndStyle( // GET the snapshot by name snapshot, err := d.api.SnapshotGetByName(ctx, sourceVolumeUUID, snapshotName) if err != nil { - return fmt.Errorf("error checking for snapshot: %v", err) + return fmt.Errorf("error checking for snapshot; %v", err) } if snapshot == nil { return fmt.Errorf("error looking up snapshot: %v", snapshotName) @@ -1535,7 +1541,7 @@ func (d OntapAPIREST) SnapshotDeleteByNameAndStyle( // DELETE the snapshot snapshotDeleteResult, err := d.api.SnapshotDelete(ctx, sourceVolumeUUID, snapshotUUID) if err != nil { - return fmt.Errorf("error while deleting snapshot: %v", err) + return fmt.Errorf("error while deleting snapshot; %v", err) } if snapshotDeleteResult == nil { return fmt.Errorf("error while deleting snapshot: %v", snapshotName) @@ -1565,7 +1571,7 @@ func (d OntapAPIREST) SnapshotDeleteByNameAndStyle( func (d OntapAPIREST) FlexgroupSnapshotDelete(ctx context.Context, snapshotName, sourceVolume string) error { volume, err := d.FlexgroupInfo(ctx, sourceVolume) if err != nil { - return fmt.Errorf("error looking up source volume: %v", err) + return fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -1578,7 +1584,7 @@ func (d OntapAPIREST) FlexgroupSnapshotDelete(ctx context.Context, snapshotName, func (d OntapAPIREST) VolumeSnapshotDelete(ctx context.Context, snapshotName, sourceVolume string) error { volume, err := d.VolumeInfo(ctx, sourceVolume) if err != nil { - return fmt.Errorf("error looking up source volume: %v", err) + return fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -2441,7 +2447,7 @@ func (d OntapAPIREST) IscsiInterfaceGet(ctx context.Context, svm string) ([]stri var iSCSINodeNames []string interfaceResponse, err := d.api.IscsiInterfaceGet(ctx) if err != nil { - return nil, fmt.Errorf("could not get SVM iSCSI node name: %v", err) + return nil, fmt.Errorf("could not get SVM iSCSI node name; %v", err) } if interfaceResponse == nil || interfaceResponse.Payload == nil { return nil, nil @@ -2653,7 +2659,7 @@ func (d OntapAPIREST) GetSLMDataLifs(ctx context.Context, ips, reportingNodeName netInterfaces, err := d.api.NetworkIPInterfacesList(ctx) if err != nil { - return nil, fmt.Errorf("error checking network interfaces: %v", err) + return nil, fmt.Errorf("error checking network interfaces; %v", err) } if netInterfaces == nil || netInterfaces.Payload == nil { @@ -2806,7 +2812,7 @@ func (d OntapAPIREST) NVMeSubsystemAddNamespace(ctx context.Context, subsystemUU defer Logd(ctx, d.driverName, d.api.ClientConfig().DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< NVMeSubsystemAddNamespace") if err := d.api.NVMeSubsystemAddNamespace(ctx, subsystemUUID, nsUUID); err != nil { - return fmt.Errorf("error adding namespace to subsystem: %v", err) + return fmt.Errorf("error adding namespace to subsystem; %v", err) } return nil @@ -2824,7 +2830,7 @@ func (d OntapAPIREST) NVMeSubsystemRemoveNamespace(ctx context.Context, subsysUU defer Logd(ctx, d.driverName, d.api.ClientConfig().DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< NVMeSubsystemRemoveNamespace") if err := d.api.NVMeSubsystemRemoveNamespace(ctx, subsysUUID, nsUUID); err != nil { - return fmt.Errorf("error removing Namespace from subsystem map: %v", err) + return fmt.Errorf("error removing Namespace from subsystem map; %v", err) } return nil @@ -2906,8 +2912,40 @@ func (d OntapAPIREST) NVMeAddHostToSubsystem(ctx context.Context, hostNQN, subsy // Add new host to the subsystem if err := d.api.NVMeAddHostNqnToSubsystem(ctx, hostNQN, subsysUUID); err != nil { - return fmt.Errorf("failed to add host nqn to subsystem, %v", err) + return fmt.Errorf("failed to add host nqn to subsystem; %v", err) + } + return nil +} + +func (d OntapAPIREST) NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsysUUID string) error { + fields := LogFields{ + "Method": "NVMeRemoveHostToSubsystem", + "Type": "OntapAPIREST", + "subsystem uuid": subsysUUID, + } + Logd(ctx, d.driverName, d.api.ClientConfig().DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> NVMeRemoveHostToSubsystem") + defer Logd(ctx, d.driverName, d.api.ClientConfig().DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< NVMeRemoveHostToSubsystem") + + hosts, err := d.api.NVMeGetHostsOfSubsystem(ctx, subsysUUID) + if err != nil { + return err + } + + hostFound := false + for _, host := range hosts { + if host != nil && *host.Nqn == hostNQN { + hostFound = true + break + } + } + + if hostFound { + // Remove host from the subsystem + if err := d.api.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsysUUID); err != nil { + return fmt.Errorf("failed to remove host nqn from subsystem; %v", err) + } } + return nil } @@ -2923,7 +2961,7 @@ func (d OntapAPIREST) NVMeSubsystemCreate(ctx context.Context, subsystemName str subsystem, err := d.api.NVMeSubsystemGetByName(ctx, subsystemName) if err != nil { - Logc(ctx).Infof("problem getting subsystem %v", err) + Logc(ctx).Infof("problem getting subsystem; %v", err) return nil, err } if subsystem == nil { @@ -2958,7 +2996,7 @@ func (d OntapAPIREST) NVMeEnsureNamespaceMapped(ctx context.Context, subsystemUU // map namespace to the subsystem isNameSpaceMapped, err := d.api.NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID) if err != nil { - return fmt.Errorf("Unable to get namespace subsystem mapping: err:%v", err) + return fmt.Errorf("Unable to get namespace subsystem mapping; %v", err) } // check if it is mapped already or not. if not mapped, add it to subsystem, else treat it as success @@ -2977,37 +3015,61 @@ func (d OntapAPIREST) NVMeEnsureNamespaceMapped(ctx context.Context, subsystemUU // a) removes the namespace from the subsystem // b) deletes the subsystem if no more namespaces are attached to it // If namespace is not mapped to subsystem, it is treated as success -func (d OntapAPIREST) NVMeEnsureNamespaceUnmapped(ctx context.Context, subsystemUUID, namespaceUUID string) error { +// The function also returns a bool value along with error. A true value denotes the subsystem is deleted +// successfully and Published info can be removed for the NVMe volume +func (d OntapAPIREST) NVMeEnsureNamespaceUnmapped(ctx context.Context, hostNQN, subsystemUUID, namespaceUUID string) (bool, error) { // check is namespace is mapped to the subsystem before attempting to remove it isNameSpaceMapped, err := d.api.NVMeIsNamespaceMapped(ctx, subsystemUUID, namespaceUUID) if err != nil { - return fmt.Errorf("Error getting namespace %s from subsystem %s. API returned error %v", namespaceUUID, subsystemUUID, err) + return false, fmt.Errorf("error getting namespace %s from subsystem %s; %v", namespaceUUID, subsystemUUID, err) } + // If namespace is not mapped, remove the published info if there is any if isNameSpaceMapped == false { Logc(ctx).Infof("Namespace %v is not mapped to subsystem %v", namespaceUUID, subsystemUUID) - return nil + return true, nil + } + + subsystemHosts, err := d.api.NVMeGetHostsOfSubsystem(ctx, subsystemUUID) + if err != nil { + return false, fmt.Errorf("error getting hosts mapped to subsystem with UUID %s; %v", subsystemUUID, err) + } + + if subsystemHosts == nil { + return false, fmt.Errorf("error getting hosts attached to subsystem %v", subsystemUUID) + } + + // In case of multiple hosts attached to a subsystem (e.g. in RWX case), do not delete the namespace, + // subsystem or the published info + if len(subsystemHosts) > 1 { + Logc(ctx).Infof("Multiple hosts are attached to this subsystem %v. Do not delete namespace or subsystem", subsystemUUID) + // Remove HostNQN from the subsystem using api call + if err := d.api.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID); err != nil { + Logc(ctx).Errorf("Remove host from subsystem failed; %v", err) + return false, err + } + return false, nil } // Unmap the namespace from the subsystem err = d.api.NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, namespaceUUID) if err != nil { - return fmt.Errorf("Error removing namespace %s from subsystem %s. API returned error %v", namespaceUUID, subsystemUUID, err) + return false, fmt.Errorf("error removing namespace %s from subsystem %s; %v", namespaceUUID, subsystemUUID, err) } // Get the number of namespaces present in the subsystem count, err := d.api.NVMeNamespaceCount(ctx, subsystemUUID) if err != nil { - return fmt.Errorf("Error getting namespace count for subsystem %s. API returned error %v", subsystemUUID, err) + return false, fmt.Errorf("error getting namespace count for subsystem %s; %v", subsystemUUID, err) } // Delete the subsystem if no. of namespaces is 0 if count == 0 { if err := d.api.NVMeSubsystemDelete(ctx, subsystemUUID); err != nil { - return fmt.Errorf("Error deleting subsystem %s. API returned error %v", subsystemUUID, err) + return false, fmt.Errorf("error deleting subsystem %s; %v", subsystemUUID, err) } } - return nil + return true, nil } func (d OntapAPIREST) NVMeNamespaceGetSize(ctx context.Context, namespacePath string) (int, error) { diff --git a/storage_drivers/ontap/api/abstraction_rest_test.go b/storage_drivers/ontap/api/abstraction_rest_test.go index 68a525216..d725f0318 100644 --- a/storage_drivers/ontap/api/abstraction_rest_test.go +++ b/storage_drivers/ontap/api/abstraction_rest_test.go @@ -542,6 +542,59 @@ func TestNVMeAddHostToSubsystem(t *testing.T) { assert.NoError(t, err) } +func TestNVMeRemoveHostFromSubsystem(t *testing.T) { + clientConfig := api.ClientConfig{ + DebugTraceFlags: map[string]bool{"method": true}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mock := mockapi.NewMockRestClientInterface(ctrl) + oapi, err := api.NewOntapAPIRESTFromRestClientInterface(mock) + assert.NoError(t, err) + + // case 1 : Error removing host from subsystem + hostNQN := "fakeNQN" + subsystemUUID := "fakesubsysUUID" + host1 := &models.NvmeSubsystemHost{} + + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return(nil, fmt.Errorf("Error while getting hosts for subsystem")) + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + + err = oapi.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID) + + assert.Error(t, err) + + // case 2 : host not found + Nqn := "wrongNQN" + host1.Nqn = &Nqn + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil) + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + + err = oapi.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID) + + assert.NoError(t, err) + + // case 3 : host found but failed to remove it + host1.Nqn = &hostNQN + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil) + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID).Return(fmt.Errorf("Error while removing host")) + + err = oapi.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID) + + assert.Error(t, err) + + // case 4 : Success- host found and removed it + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil) + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID).Return(nil) + + err = oapi.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID) + + assert.NoError(t, err) +} + func TestNVMeSubsystemCreate(t *testing.T) { clientConfig := api.ClientConfig{ DebugTraceFlags: map[string]bool{"method": true}, @@ -669,62 +722,119 @@ func TestNVMeNamespaceUnmapped(t *testing.T) { subsystemUUID := "fakeSubsysUUID" nsUUID := "fakeNsUUID" + hostNQN := "fakeHostNQN" + host2NQN := "fakeHost2NQN" + host1 := &models.NvmeSubsystemHost{Nqn: &hostNQN} + host2 := &models.NvmeSubsystemHost{Nqn: &host2NQN} + var removePublishInfo bool // case 1: Error getting namespace from subsystem mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(false, fmt.Errorf("Error getting namespace subsystem mapping")) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(false, fmt.Errorf("Error getting namespace subsystem mapping")).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, false, removePublishInfo) assert.Error(t, err) // case 2: Namespace is not mapped mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(false, nil) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(false, nil).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, true, removePublishInfo) assert.NoError(t, err) - // case 3: Error removing namespace from subsystem + // case 3: Failed to get hosts of the subsystem mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil) - mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(fmt.Errorf("Error removing namespace from subsystem")) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return(nil, fmt.Errorf("failed to get hosts")).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, false, removePublishInfo) assert.Error(t, err) - // case 4: Error getting namespace count from subsystem + // case 4: hosts of the subsystem not returned mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil) - mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil) - mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), fmt.Errorf("Error getting namespace count from subsystem")) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return(nil, nil).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, false, removePublishInfo) assert.Error(t, err) - // case 5: Error deleting subsystem + // case 5: multiple hosts of the subsystem returned but error while removing host from subsystem mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil) - mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil) - mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), nil) - mock.EXPECT().NVMeSubsystemDelete(ctx, subsystemUUID).Return(fmt.Errorf("Error deleting subsystem")) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1, host2}, nil).Times(1) + mock.EXPECT().NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID).Return(fmt.Errorf("Error removing host from subsystem")).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, false, removePublishInfo) assert.Error(t, err) - // case 6: Success deleting subsystem + // case 6: multiple hosts of the subsystem returned and success while removing host from subsystem mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil) - mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil) - mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), nil) - mock.EXPECT().NVMeSubsystemDelete(ctx, subsystemUUID).Return(nil) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1, host2}, nil).Times(1) + mock.EXPECT().NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID).Return(nil).Times(1) + + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + + assert.Equal(t, false, removePublishInfo) + assert.NoError(t, err) + + // case 7: Error removing namespace from subsystem + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil).Times(1) + mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(fmt.Errorf("Error removing namespace from subsystem")).Times(1) + + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + + assert.Equal(t, false, removePublishInfo) + assert.Error(t, err) + + // case 8: Error getting namespace count from subsystem + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil).Times(1) + mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil).Times(1) + mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), fmt.Errorf("Error getting namespace count from subsystem")).Times(1) + + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + + assert.Equal(t, false, removePublishInfo) + assert.Error(t, err) + + // case 9: Error deleting subsystem + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil).Times(1) + mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), nil).Times(1) + mock.EXPECT().NVMeSubsystemDelete(ctx, subsystemUUID).Return(fmt.Errorf("Error deleting subsystem")).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil).Times(1) + + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + + assert.Equal(t, false, removePublishInfo) + assert.Error(t, err) + + // case 10: Success deleting subsystem + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).AnyTimes().Return(nil).Times(1) + mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), nil).Times(1) + mock.EXPECT().NVMeSubsystemDelete(ctx, subsystemUUID).Return(nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, true, removePublishInfo) assert.NoError(t, err) } diff --git a/storage_drivers/ontap/api/abstraction_zapi.go b/storage_drivers/ontap/api/abstraction_zapi.go index b00ac9d21..085d47ab7 100644 --- a/storage_drivers/ontap/api/abstraction_zapi.go +++ b/storage_drivers/ontap/api/abstraction_zapi.go @@ -2478,6 +2478,10 @@ func (d OntapAPIZAPI) NVMeAddHostToSubsystem(ctx context.Context, hostNQN, subsU return fmt.Errorf("ZAPI call is not supported yet") } +func (d OntapAPIZAPI) NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsUUID string) error { + return fmt.Errorf("ZAPI call is not supported yet") +} + func (d OntapAPIZAPI) NVMeIsNamespaceMapped(ctx context.Context, subsysUUID, nsUUID string) (bool, error) { return false, fmt.Errorf("ZAPI call is not supported yet") } @@ -2486,8 +2490,8 @@ func (d OntapAPIZAPI) NVMeEnsureNamespaceMapped(ctx context.Context, subsystemUU return fmt.Errorf("ZAPI call is not supported yet") } -func (d OntapAPIZAPI) NVMeEnsureNamespaceUnmapped(ctx context.Context, subsystemUUID, namespaceUUID string) error { - return fmt.Errorf("ZAPI call is not supported yet") +func (d OntapAPIZAPI) NVMeEnsureNamespaceUnmapped(ctx context.Context, hostNQN, subsystemUUID, namespaceUUID string) (bool, error) { + return false, fmt.Errorf("ZAPI call is not supported yet") } func (d OntapAPIZAPI) NVMeNamespaceGetSize(ctx context.Context, subsystemName string) (int, error) { diff --git a/storage_drivers/ontap/api/ontap_rest.go b/storage_drivers/ontap/api/ontap_rest.go index be168a6b4..4da82bedd 100644 --- a/storage_drivers/ontap/api/ontap_rest.go +++ b/storage_drivers/ontap/api/ontap_rest.go @@ -248,7 +248,7 @@ func NewRestClientFromOntapConfig( apiREST, err := NewOntapAPIREST(restClient, ontapConfig.StorageDriverName) if err != nil { - return nil, fmt.Errorf("unable to get REST API client for ontap: %v", err) + return nil, fmt.Errorf("unable to get REST API client for ontap; %v", err) } return apiREST, nil @@ -1526,7 +1526,7 @@ func (c RestClient) SnapshotCreate( func (c RestClient) SnapshotCreateAndWait(ctx context.Context, volumeUUID, snapshotName string) error { snapshotCreateResult, err := c.SnapshotCreate(ctx, volumeUUID, snapshotName) if err != nil { - return fmt.Errorf("could not create snapshot: %v", err) + return fmt.Errorf("could not create snapshot; %v", err) } if snapshotCreateResult == nil { return fmt.Errorf("could not create snapshot: %v", "unexpected result") @@ -1707,7 +1707,7 @@ func (c RestClient) VolumeCloneCreate(ctx context.Context, cloneName, sourceVolu func (c RestClient) VolumeCloneCreateAsync(ctx context.Context, cloneName, sourceVolumeName, snapshot string) error { cloneCreateResult, err := c.createCloneNAS(ctx, cloneName, sourceVolumeName, snapshot) if err != nil { - return fmt.Errorf("could not create clone: %v", err) + return fmt.Errorf("could not create clone; %v", err) } if cloneCreateResult == nil { return fmt.Errorf("could not create clone: %v", "unexpected result") @@ -1794,10 +1794,18 @@ func (c RestClient) IscsiInitiatorSetDefaultAuth( if *getDefaultAuthResponse.Payload.NumRecords != 1 { return fmt.Errorf("should only be one default iscsi initiator") } + if getDefaultAuthResponse.Payload.IscsiCredentialsResponseInlineRecords[0] == nil { + return fmt.Errorf("could not get the default iscsi initiator") + } + if getDefaultAuthResponse.Payload.IscsiCredentialsResponseInlineRecords[0].Initiator == nil { + return fmt.Errorf("could not get the default iscsi initiator") + } params := san.NewIscsiCredentialsModifyParamsWithTimeout(c.httpClient.Timeout) params.Context = ctx params.HTTPClient = c.httpClient + params.SvmUUID = c.svmUUID + params.Initiator = *getDefaultAuthResponse.Payload.IscsiCredentialsResponseInlineRecords[0].Initiator outboundInfo := &models.IscsiCredentialsInlineChapInlineOutbound{} if outbountUserName != "" && outboundPassphrase != "" { @@ -1815,7 +1823,6 @@ func (c RestClient) IscsiInitiatorSetDefaultAuth( authInfo := &models.IscsiCredentials{ AuthenticationType: utils.Ptr(authType), Chap: chapInfo, - Initiator: getDefaultAuthResponse.Payload.IscsiCredentialsResponseInlineRecords[0].Initiator, } params.SetInfo(authInfo) @@ -1992,7 +1999,7 @@ func (c RestClient) IgroupDestroy(ctx context.Context, initiatorGroupName string lunDeleteResult, err := c.api.San.IgroupDelete(params, c.authInfo) if err != nil { - return fmt.Errorf("could not delete igroup: %v", err) + return fmt.Errorf("could not delete igroup; %v", err) } if lunDeleteResult == nil { return fmt.Errorf("could not delete igroup: %v", "unexpected result") @@ -2214,8 +2221,8 @@ func (c RestClient) LunCloneCreate( Name: utils.Ptr(sourcePath), }, }, - Name: utils.Ptr(lunPath), // example: /vol/myVolume/myLun1 - OsType: utils.Ptr(osType), + Name: utils.Ptr(lunPath), // example: /vol/myVolume/myLun1 + // OsType is not supported for POST when creating a LUN clone Space: &models.LunInlineSpace{ Size: utils.Ptr(sizeInBytes), }, @@ -2366,7 +2373,7 @@ func (c RestClient) LunDelete( lunDeleteResult, err := c.api.San.LunDelete(params, c.authInfo) if err != nil { - return fmt.Errorf("could not delete lun: %v", err) + return fmt.Errorf("could not delete lun; %v", err) } if lunDeleteResult == nil { return fmt.Errorf("could not delete lun: %v", "unexpected result") @@ -2952,7 +2959,7 @@ func (c RestClient) NetInterfaceGetDataLIFs(ctx context.Context, protocol string lifResponse, err := c.api.Networking.NetworkIPInterfacesGet(params, c.authInfo) if err != nil { - return nil, fmt.Errorf("error checking network interfaces: %v", err) + return nil, fmt.Errorf("error checking network interfaces; %v", err) } if lifResponse == nil { return nil, fmt.Errorf("unexpected error checking network interfaces") @@ -5671,7 +5678,7 @@ func (c RestClient) NVMeNamespaceSetSize(ctx context.Context, nsUUID string, new nsModify, err := c.api.NvMe.NvmeNamespaceModify(params, c.authInfo) if err != nil { - return fmt.Errorf("namespace resize failed, %v", err) + return fmt.Errorf("namespace resize failed; %v", err) } if nsModify == nil { return fmt.Errorf("namespace resize failed") @@ -5782,7 +5789,7 @@ func (c RestClient) NVMeSubsystemRemoveNamespace(ctx context.Context, subsysUUID _, err := c.api.NvMe.NvmeSubsystemMapDelete(params, c.authInfo) if err != nil { - return fmt.Errorf("error while deleting namespace from subsystem map: %v", err) + return fmt.Errorf("error while deleting namespace from subsystem map; %v", err) } return nil } @@ -5839,7 +5846,10 @@ func (c RestClient) NVMeNamespaceCount(ctx context.Context, subsysUUID string) ( } if getSubsys.IsSuccess() { - return *getSubsys.GetPayload().NumRecords, nil + payload := getSubsys.GetPayload() + if payload != nil && payload.NumRecords != nil { + return *payload.NumRecords, nil + } } return 0, fmt.Errorf("failed to get subsystem map collection") @@ -5961,7 +5971,7 @@ func (c RestClient) NVMeSubsystemDelete(ctx context.Context, subsysUUID string) subsysDeleted, err := c.api.NvMe.NvmeSubsystemDelete(params, c.authInfo) if err != nil { - return fmt.Errorf("issue while deleting the subsystem, %v", err) + return fmt.Errorf("issue while deleting the subsystem; %v", err) } if subsysDeleted == nil { return fmt.Errorf("issue while deleting the subsystem") @@ -5997,6 +6007,25 @@ func (c RestClient) NVMeAddHostNqnToSubsystem(ctx context.Context, hostNQN, subs return fmt.Errorf("error while adding host to subsystem %v", hostAdded.Error()) } +// NVMeRemoveHostFromSubsystem remove the NQN of the host from the subsystem +func (c RestClient) NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsUUID string) error { + params := nvme.NewNvmeSubsystemHostDeleteParamsWithTimeout(c.httpClient.Timeout) + params.Context = ctx + params.HTTPClient = c.httpClient + params.SubsystemUUID = subsUUID + params.Nqn = hostNQN + + hostRemoved, err := c.api.NvMe.NvmeSubsystemHostDelete(params, c.authInfo) + if err != nil { + return fmt.Errorf("issue while removing host to subsystem; %v", err) + } + if hostRemoved.IsSuccess() { + return nil + } + + return fmt.Errorf("error while removing host from subsystem; %v", hostRemoved.Error()) +} + // NVMeGetHostsOfSubsystem retuns all the hosts connected to a subsystem func (c RestClient) NVMeGetHostsOfSubsystem(ctx context.Context, subsUUID string) ([]*models.NvmeSubsystemHost, error) { params := nvme.NewNvmeSubsystemHostCollectionGetParamsWithTimeout(c.httpClient.Timeout) diff --git a/storage_drivers/ontap/api/ontap_rest_interface.go b/storage_drivers/ontap/api/ontap_rest_interface.go index e25e6731e..65c6a0d59 100644 --- a/storage_drivers/ontap/api/ontap_rest_interface.go +++ b/storage_drivers/ontap/api/ontap_rest_interface.go @@ -356,4 +356,5 @@ type RestClientInterface interface { NVMeSubsystemAddNamespace(ctx context.Context, subsystemUUID, nsUUID string) error // NVMeSubsystemRemoveNamespace ummaps a given namespace from a Subsystem with the specified subsystem UUID. NVMeSubsystemRemoveNamespace(ctx context.Context, subsysUUID, nsUUID string) error + NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsystemUUID string) error } diff --git a/storage_drivers/ontap/api/ontap_zapi.go b/storage_drivers/ontap/api/ontap_zapi.go index 459009a05..7d10ed479 100644 --- a/storage_drivers/ontap/api/ontap_zapi.go +++ b/storage_drivers/ontap/api/ontap_zapi.go @@ -618,7 +618,8 @@ func (c Client) LunGet(path string) (*azgo.LunInfoType, error) { SetSize(0). SetCreationTimestamp(0). SetOnline(false). - SetMapped(false) + SetMapped(false). + SetSerialNumber("") desiredAttributes.SetLunInfo(*lunInfo) response, err := azgo.NewLunGetIterRequest(). diff --git a/storage_drivers/ontap/ontap_common.go b/storage_drivers/ontap/ontap_common.go index 55996c1a5..a1dcc4696 100644 --- a/storage_drivers/ontap/ontap_common.go +++ b/storage_drivers/ontap/ontap_common.go @@ -1,4 +1,4 @@ -// Copyright 2022 NetApp, Inc. All Rights Reserved. +// Copyright 2023 NetApp, Inc. All Rights Reserved. package ontap @@ -562,52 +562,6 @@ func GetOntapDriverRedactList() []string { return clone[:] } -// PopulateOntapLunMapping helper function to fill in volConfig with its LUN mapping values. -// This function assumes that the list of data LIFs has not changed since driver initialization and volume creation -func PopulateOntapLunMapping( - ctx context.Context, clientAPI api.OntapAPI, ips []string, volConfig *storage.VolumeConfig, lunID int, - lunPath, igroupName string, -) error { - var targetIQN string - targetIQN, err := clientAPI.IscsiNodeGetNameRequest(ctx) - if err != nil { - return fmt.Errorf("problem retrieving iSCSI services: %v", err) - } - - lunResponse, err := clientAPI.LunGetByName(ctx, lunPath) - if err != nil || lunResponse == nil { - return fmt.Errorf("problem retrieving LUN info: %v", err) - } - serial := lunResponse.SerialNumber - - filteredIPs, err := getISCSIDataLIFsForReportingNodes(ctx, clientAPI, ips, lunPath, igroupName, - volConfig.ImportNotManaged) - if err != nil { - return err - } - - if len(filteredIPs) == 0 { - Logc(ctx).Warn("Unable to find reporting ONTAP nodes for discovered dataLIFs.") - filteredIPs = ips - } - - volConfig.AccessInfo.IscsiTargetPortal = filteredIPs[0] - volConfig.AccessInfo.IscsiPortals = filteredIPs[1:] - volConfig.AccessInfo.IscsiTargetIQN = targetIQN - volConfig.AccessInfo.IscsiLunNumber = int32(lunID) - volConfig.AccessInfo.IscsiIgroup = igroupName - volConfig.AccessInfo.IscsiLunSerial = serial - Logc(ctx).WithFields(LogFields{ - "volume": volConfig.Name, - "volume_internal": volConfig.InternalName, - "targetIQN": volConfig.AccessInfo.IscsiTargetIQN, - "lunNumber": volConfig.AccessInfo.IscsiLunNumber, - "igroup": volConfig.AccessInfo.IscsiIgroup, - }).Debug("Mapped ONTAP LUN.") - - return nil -} - // getNodeSpecificIgroupName generates a distinct igroup name for node name. // Igroup names may collide if node names are over 59 characters. func getNodeSpecificIgroupName(nodeName, tridentUUID string) string { @@ -681,6 +635,17 @@ func PublishLUN( fstype = lunFSType } + // Get LUN Serial Number + lunResponse, err := clientAPI.LunGetByName(ctx, lunPath) + if err != nil || lunResponse == nil { + return fmt.Errorf("problem retrieving LUN info: %v", err) + } + serial := lunResponse.SerialNumber + + if serial == "" { + return fmt.Errorf("LUN '%v' serial number not found", lunPath) + } + if config.DriverContext == tridentconfig.ContextCSI { // Get the info about the targeted node var targetNode *utils.Node @@ -729,6 +694,7 @@ func PublishLUN( // Add fields needed by Attach publishInfo.IscsiLunNumber = int32(lunID) + publishInfo.IscsiLunSerial = serial publishInfo.IscsiTargetPortal = filteredIPs[0] publishInfo.IscsiPortals = filteredIPs[1:] publishInfo.IscsiTargetIQN = iSCSINodeName @@ -2726,18 +2692,10 @@ func cloneFlexvol( return err } - // NVMe clone is not ready by the time we return from VolumeCloneCreate. - // This check here makes sure that we don't fail the clone operation during the time clone is not ready - // Currently this change is done only for NVMe volumes but it should work with other volumes too if needed - if config.SANType == sa.NVMe { - - desiredNVMeVolStates := []string{"online"} - abortNVMeVolStates := []string{"error"} - volState, err := client.VolumeWaitForStates(ctx, name, desiredNVMeVolStates, abortNVMeVolStates, - maxFlexvolCloneWait) - if err != nil { - return fmt.Errorf("unable to create flexClone for NVMe volume %v, volState:%v", name, volState) - } + desiredStates, abortStates := []string{"online"}, []string{"error"} + volState, err := client.VolumeWaitForStates(ctx, name, desiredStates, abortStates, maxFlexvolCloneWait) + if err != nil { + return fmt.Errorf("unable to create flexClone for volume %v, volState:%v", name, volState) } if err = client.VolumeSetComment(ctx, name, name, labels); err != nil { @@ -2952,23 +2910,49 @@ func GetEncryptionValue(encryption string) (*bool, string, error) { return nil, "", nil } -// ConstructOntapNASSMBVolumePath returns windows compatible volume path for Ontap NAS. +// ConstructOntapNASVolumeAccessPath returns volume path for ONTAP NAS. // Function accepts parameters in following way: // 1.smbShare : This takes the value given in backend config, without path prefix. // 2.volumeName : This takes the value of volume's internal name, it is always prefixed with unix styled path separator. -// Example, ConstructOntapNASSMBVolumePath(ctx, "test_share", "/vol") -func ConstructOntapNASSMBVolumePath(ctx context.Context, smbShare, volumeName string) string { - Logc(ctx).Debug(">>>> smb.ConstructOntapNASSMBVolumePath") - defer Logc(ctx).Debug("<<<< smb.ConstructOntapNASSMBVolumePath") +// 3.volConfig : This takes value of volume configuration. +// 4.Protocol : This takes the value of NAS protocol (NFS/SMB). +// Example, ConstructOntapNASVolumeAccessPath(ctx, "test_share", "/vol" , volConfig, "nfs") +func ConstructOntapNASVolumeAccessPath( + ctx context.Context, smbShare, volumeName string, + volConfig *storage.VolumeConfig, protocol string, +) string { + Logc(ctx).Debug(">>>> smb.ConstructOntapNASVolumeAccessPath") + defer Logc(ctx).Debug("<<<< smb.ConstructOntapNASVolumeAccessPath") var completeVolumePath string - if smbShare != "" { - completeVolumePath = utils.WindowsPathSeparator + smbShare + volumeName - } else { - // If the user does not specify an SMB Share, Trident creates it with the same name as the flexvol volume name. - completeVolumePath = volumeName - } + var smbSharePath string + switch protocol { + case sa.NFS: + if volConfig.ReadOnlyClone { + return fmt.Sprintf("/%s/%s/%s", volConfig.CloneSourceVolumeInternal, ".snapshot", + volConfig.CloneSourceSnapshot) + } else if volumeName != utils.UnixPathSeparator+volConfig.InternalName && strings.HasPrefix(volumeName, + utils.UnixPathSeparator) { + // For managed import, return the original junction path + return volumeName + } + return fmt.Sprintf("/%s", volConfig.InternalName) + case sa.SMB: + if smbShare != "" { + smbSharePath = fmt.Sprintf("\\%s", smbShare) + } else { + // Set share path as empty, volume name contains the path prefix. + smbSharePath = "" + } + if volConfig.ReadOnlyClone { + completeVolumePath = fmt.Sprintf("%s\\%s\\%s\\%s", smbSharePath, volConfig.CloneSourceVolumeInternal, + "~snapshot", volConfig.CloneSourceSnapshot) + } else { + // If the user does not specify an SMB Share, Trident creates it with the same name as the flexvol volume name. + completeVolumePath = smbSharePath + volumeName + } + } // Replace unix styled path separator, if exists return strings.Replace(completeVolumePath, utils.UnixPathSeparator, utils.WindowsPathSeparator, -1) } @@ -3001,7 +2985,8 @@ func ConstructOntapNASFlexGroupSMBVolumePath(ctx context.Context, smbShare, volu // 3.volConfig : This takes the value of volume configuration. // 4. protocol: This takes the value of the protocol for which the path needs to be created. // Example, ConstructOntapNASQTreeVolumePath(ctx, test.smbShare, "flex-vol", volConfig, sa.SMB) -func ConstructOntapNASQTreeVolumePath(ctx context.Context, smbShare, flexvol string, +func ConstructOntapNASQTreeVolumePath( + ctx context.Context, smbShare, flexvol string, volConfig *storage.VolumeConfig, protocol string, ) (completeVolumePath string) { Logc(ctx).Debug(">>>> smb.ConstructOntapNASQTreeVolumePath") diff --git a/storage_drivers/ontap/ontap_common_test.go b/storage_drivers/ontap/ontap_common_test.go index ba1850b9f..a514258d7 100644 --- a/storage_drivers/ontap/ontap_common_test.go +++ b/storage_drivers/ontap/ontap_common_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 NetApp, Inc. All Rights Reserved. +// Copyright 2023 NetApp, Inc. All Rights Reserved. package ontap @@ -2256,21 +2256,57 @@ func TestRestGetSLMLifs(t *testing.T) { assert.ElementsMatch(t, result, []string{"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4"}) } -func TestConstructOntapNASSMBVolumePath(t *testing.T) { +func TestConstructOntapNASVolumeAccessPath(t *testing.T) { ctx := context.Background() + volConfig := &storage.VolumeConfig{ + InternalName: "vol", + } + tests := []struct { smbShare string + volName string + protocol string expectedPath string }{ - {"test_share", "\\test_sharevol"}, - {"", "vol"}, + {"test_share", "/vol", "smb", "\\test_share\\vol"}, + {"", "/vol", "smb", "\\vol"}, + {"", "/vol", "nfs", "/vol"}, + {"", "/vol1", "nfs", "/vol1"}, } for _, test := range tests { t.Run(test.smbShare, func(t *testing.T) { - result := ConstructOntapNASSMBVolumePath(ctx, test.smbShare, "vol") - assert.Equal(t, test.expectedPath, result, "unable to construct Ontap-NAS-QTree SMB volume path") + result := ConstructOntapNASVolumeAccessPath(ctx, test.smbShare, test.volName, volConfig, test.protocol) + assert.Equal(t, test.expectedPath, result, "unable to construct Ontap-NAS volume access path") + }) + } +} + +func TestConstructOntapNASVolumeAccessPath_ROClone(t *testing.T) { + ctx := context.Background() + + volConfig := &storage.VolumeConfig{ + InternalName: "vol", + ReadOnlyClone: true, + CloneSourceVolumeInternal: "sourceVol", + CloneSourceSnapshot: "snapshot-abcd-1234-wxyz", + } + + tests := []struct { + smbShare string + protocol string + expectedPath string + }{ + {"test_share", "smb", "\\test_share\\sourceVol\\~snapshot\\snapshot-abcd-1234-wxyz"}, + {"", "smb", "\\sourceVol\\~snapshot\\snapshot-abcd-1234-wxyz"}, + {"", "nfs", "/sourceVol/.snapshot/snapshot-abcd-1234-wxyz"}, + } + + for _, test := range tests { + t.Run(test.smbShare, func(t *testing.T) { + result := ConstructOntapNASVolumeAccessPath(ctx, test.smbShare, "/vol", volConfig, test.protocol) + assert.Equal(t, test.expectedPath, result, "unable to construct Ontap-NAS volume access path") }) } } @@ -2997,84 +3033,6 @@ func TestGetDesiredExportPolicyRules(t *testing.T) { assert.NoError(t, err, "Found error when expected none") } -func TestPopulateOntapLunMapping(t *testing.T) { - ctx := context.Background() - mockCtrl := gomock.NewController(t) - mockAPI := mockapi.NewMockOntapAPI(mockCtrl) - inputIPs := []string{ - "1.1.1.1", "2.2.2.2", "3.3.3.3", - } - - volConfig := &storage.VolumeConfig{ - Name: "testVol", - InternalName: "testInternalVol", - ImportNotManaged: true, - } - - lunID := 5555 - - lunPath := "fakeLunPath" - - igroupName := "testIgroupName" - - dummyLun := &api.Lun{ - Comment: "dummyLun", - SerialNumber: "testSerialNumber", - } - reportingNodes := []string{"Node1"} - - error := fmt.Errorf("Error returned") - - // Test1: Positive flow - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", nil) - mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) - mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return(reportingNodes, nil) - mockAPI.EXPECT().GetSLMDataLifs(ctx, inputIPs, reportingNodes).Return([]string{"1.1.1.1"}, nil) - - err := PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.NoError(t, err) - assert.Equal(t, "1.1.1.1", volConfig.AccessInfo.IscsiTargetPortal) - assert.Equal(t, "testIQN", volConfig.AccessInfo.IscsiTargetIQN) - assert.Equal(t, int32(5555), volConfig.AccessInfo.IscsiLunNumber) - assert.Equal(t, "testIgroupName", volConfig.AccessInfo.IscsiIgroup) - assert.Equal(t, "testSerialNumber", volConfig.AccessInfo.IscsiLunSerial) - - // Test2: Error flow: IscsiNodeGetNameRequest returns error - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", error) - - err = PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.Error(t, err) - - // Test3: Error flow: LunGetByName returns error - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", nil) - mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, error) - - err = PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.Error(t, err) - - // Test4: Error flow: LunMapGetReportingNodes returns error - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", nil) - mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) - mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return(reportingNodes, error) - - err = PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.Error(t, err) - - // Test5: Positive flow: Unable to find reporting ONTAP nodes - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", nil) - mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) - mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return(reportingNodes, nil) - mockAPI.EXPECT().GetSLMDataLifs(ctx, inputIPs, reportingNodes).Return([]string{}, nil) - - err = PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.NoError(t, err) -} - func TestReconcileNASNodeAccess(t *testing.T) { ctx := context.Background() mockCtrl := gomock.NewController(t) @@ -3909,6 +3867,16 @@ func TestPublishLun(t *testing.T) { } nodeList := []*utils.Node{&node} + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + + dummyLunNoSerial := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "", + } + commonConfig := &drivers.CommonStorageDriverConfig{ DebugTraceFlags: map[string]bool{"method": true}, DriverContext: "csi", @@ -3929,6 +3897,7 @@ func TestPublishLun(t *testing.T) { } // Test1 - Positive flow mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, igroupName, publishInfo.HostIQN[0]) mockAPI.EXPECT().EnsureLunMapped(ctx, igroupName, lunPath).Return(1111, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return([]string{"Node1"}, nil) @@ -3951,6 +3920,7 @@ func TestPublishLun(t *testing.T) { mockAPI = mockapi.NewMockOntapAPI(mockCtrl) publishInfo.HostIQN = []string{"host_iqn"} mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("", fmt.Errorf("LunGetFSType returned error")) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, igroupName, publishInfo.HostIQN[0]) mockAPI.EXPECT().EnsureLunMapped(ctx, igroupName, lunPath).Return(1111, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return([]string{"Node1"}, nil) @@ -3965,6 +3935,7 @@ func TestPublishLun(t *testing.T) { publishInfo.HostIQN = []string{"host_iqn"} publishInfo.HostName = "fakeHostName" mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("", fmt.Errorf("LunGetFSType returned error")) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) err = PublishLUN(ctx, mockAPI, config, ips, publishInfo, lunPath, igroupName, iSCSINodeName) @@ -3979,6 +3950,7 @@ func TestPublishLun(t *testing.T) { HostIQN: []string{"host_iqn"}, } mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, igroupName, gomock.Any()).Return(fmt.Errorf("EnsureIgroupAdded returned error")) @@ -3988,6 +3960,7 @@ func TestPublishLun(t *testing.T) { // Test 6 - EnsureLunMapped returns error mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) mockAPI.EXPECT().EnsureLunMapped(ctx, igroupName, lunPath).Return(1111, fmt.Errorf("EnsureLunMapped returned error")) mockAPI.EXPECT().EnsureIgroupAdded(ctx, igroupName, gomock.Any()).Return(nil) @@ -3995,6 +3968,22 @@ func TestPublishLun(t *testing.T) { err = PublishLUN(ctx, mockAPI, config, ips, publishInfo, lunPath, igroupName, iSCSINodeName) assert.Error(t, err) + + // Test 7 - LunGetByName returns error + mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, fmt.Errorf("LunGetByName returned error")) + + err = PublishLUN(ctx, mockAPI, config, ips, publishInfo, lunPath, igroupName, iSCSINodeName) + + assert.Error(t, err) + + // Test 8 - LunGetByName returns nil but Serial Number is empty + mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLunNoSerial, nil) + + err = PublishLUN(ctx, mockAPI, config, ips, publishInfo, lunPath, igroupName, iSCSINodeName) + + assert.Error(t, err) } func TestValidateSANDriver(t *testing.T) { diff --git a/storage_drivers/ontap/ontap_nas.go b/storage_drivers/ontap/ontap_nas.go index ad0901761..d63bc2e7c 100644 --- a/storage_drivers/ontap/ontap_nas.go +++ b/storage_drivers/ontap/ontap_nas.go @@ -123,6 +123,13 @@ func (d *NASStorageDriver) Initialize( return fmt.Errorf("error validating %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -455,6 +462,14 @@ func (d *NASStorageDriver) CreateClone( storagePoolSplitOnCloneVal = storagePool.InternalAttributes()[SplitOnClone] } + if cloneVolConfig.ReadOnlyClone { + if !flexvol.SnapshotDir { + return fmt.Errorf("snapshot directory access is set to %t and readOnly clone is set to %t ", + flexvol.SnapshotDir, cloneVolConfig.ReadOnlyClone) + } + return nil + } + // If storagePoolSplitOnCloneVal is still unknown, set it to backend's default value if storagePoolSplitOnCloneVal == "" { storagePoolSplitOnCloneVal = d.GetConfig().SplitOnClone @@ -927,6 +942,28 @@ func (d *NASStorageDriver) GetStorageBackendPhysicalPoolNames(context.Context) [ return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NASStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NASStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + svmUUID := d.GetAPI().GetSVMUUID() + backendPools := make([]drivers.OntapStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + func (d *NASStorageDriver) getStoragePoolAttributes(ctx context.Context) map[string]sa.Offer { client := d.GetAPI() mirroring, _ := client.IsSVMDRCapable(ctx) @@ -956,6 +993,8 @@ func (d *NASStorageDriver) CreatePrepare(ctx context.Context, volConfig *storage func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storage.VolumeConfig) error { var accessPath string + var flexvol *api.Volume + var err error fields := LogFields{ "Method": "CreateFollowup", @@ -976,9 +1015,17 @@ func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storag } // Set correct junction path - flexvol, err := d.API.VolumeInfo(ctx, volConfig.InternalName) - if err != nil { - return err + // If it's a RO clone, get source volume + if volConfig.ReadOnlyClone { + flexvol, err = d.API.VolumeInfo(ctx, volConfig.CloneSourceVolumeInternal) + if err != nil { + return err + } + } else { + flexvol, err = d.API.VolumeInfo(ctx, volConfig.InternalName) + if err != nil { + return err + } } if flexvol.JunctionPath == "" { @@ -988,13 +1035,14 @@ func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storag // 2. During Create/CreateClone there is a failure and mount is not performed. if d.Config.NASType == sa.SMB { - volConfig.AccessInfo.SMBPath = ConstructOntapNASSMBVolumePath(ctx, d.Config.SMBShare, - volConfig.InternalName) + volConfig.AccessInfo.SMBPath = ConstructOntapNASVolumeAccessPath(ctx, d.Config.SMBShare, + volConfig.InternalName, volConfig, sa.SMB) // Overwriting mount path, mounting at root instead of admin share volConfig.AccessInfo.SMBPath = "/" + volConfig.InternalName accessPath = volConfig.AccessInfo.SMBPath } else { - volConfig.AccessInfo.NfsPath = "/" + volConfig.InternalName + volConfig.AccessInfo.NfsPath = ConstructOntapNASVolumeAccessPath(ctx, d.Config.SMBShare, + volConfig.InternalName, volConfig, sa.NFS) accessPath = volConfig.AccessInfo.NfsPath } @@ -1014,10 +1062,11 @@ func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storag } } else { if d.Config.NASType == sa.SMB { - volConfig.AccessInfo.SMBPath = ConstructOntapNASSMBVolumePath(ctx, d.Config.SMBShare, - flexvol.JunctionPath) + volConfig.AccessInfo.SMBPath = ConstructOntapNASVolumeAccessPath(ctx, d.Config.SMBShare, + flexvol.JunctionPath, volConfig, sa.SMB) } else { - volConfig.AccessInfo.NfsPath = flexvol.JunctionPath + volConfig.AccessInfo.NfsPath = ConstructOntapNASVolumeAccessPath(ctx, d.Config.SMBShare, + flexvol.JunctionPath, volConfig, sa.NFS) } } return nil @@ -1189,7 +1238,7 @@ func (d *NASStorageDriver) ReconcileNodeAccess( // in physical pools list. func (d *NASStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "nfs", d.GetStorageBackendPhysicalPoolNames(ctx)) } @@ -1246,7 +1295,8 @@ func (d *NASStorageDriver) EstablishMirror( replicationSchedule = "" } - return establishMirror(ctx, localInternalVolumeName, remoteVolumeHandle, replicationPolicy, replicationSchedule, d.API) + return establishMirror(ctx, localInternalVolumeName, remoteVolumeHandle, replicationPolicy, replicationSchedule, + d.API) } // ReestablishMirror will attempt to resync a mirror relationship, if and only if the relationship existed previously @@ -1285,7 +1335,8 @@ func (d *NASStorageDriver) ReestablishMirror( replicationSchedule = "" } - return reestablishMirror(ctx, localInternalVolumeName, remoteVolumeHandle, replicationPolicy, replicationSchedule, d.API) + return reestablishMirror(ctx, localInternalVolumeName, remoteVolumeHandle, replicationPolicy, replicationSchedule, + d.API) } // PromoteMirror will break the mirror relationship and make the destination volume RW, @@ -1293,8 +1344,8 @@ func (d *NASStorageDriver) ReestablishMirror( func (d *NASStorageDriver) PromoteMirror( ctx context.Context, localInternalVolumeName, remoteVolumeHandle, snapshotName string, ) (bool, error) { - return promoteMirror(ctx, localInternalVolumeName, remoteVolumeHandle, snapshotName, d.GetConfig().ReplicationPolicy, - d.API) + return promoteMirror(ctx, localInternalVolumeName, remoteVolumeHandle, snapshotName, + d.GetConfig().ReplicationPolicy, d.API) } // GetMirrorStatus returns the current state of a mirror relationship @@ -1310,7 +1361,9 @@ func (d *NASStorageDriver) ReleaseMirror(ctx context.Context, localInternalVolum } // GetReplicationDetails returns the replication policy and schedule of a mirror relationship -func (d *NASStorageDriver) GetReplicationDetails(ctx context.Context, localInternalVolumeName, remoteVolumeHandle string) (string, string, string, error) { +func (d *NASStorageDriver) GetReplicationDetails( + ctx context.Context, localInternalVolumeName, remoteVolumeHandle string, +) (string, string, string, error) { return getReplicationDetails(ctx, localInternalVolumeName, remoteVolumeHandle, d.API) } diff --git a/storage_drivers/ontap/ontap_nas_flexgroup.go b/storage_drivers/ontap/ontap_nas_flexgroup.go index b6fd5876d..81831a212 100644 --- a/storage_drivers/ontap/ontap_nas_flexgroup.go +++ b/storage_drivers/ontap/ontap_nas_flexgroup.go @@ -107,6 +107,13 @@ func (d *NASFlexGroupStorageDriver) Initialize( return fmt.Errorf("error validating %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -1342,6 +1349,21 @@ func (d *NASFlexGroupStorageDriver) GetStorageBackendPhysicalPoolNames(context.C return physicalPoolNames } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NASFlexGroupStorageDriver) getStorageBackendPools( + ctx context.Context, +) []drivers.OntapFlexGroupStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NASFlexGroupStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // FlexGroup volumes span all or a subset of aggregates assigned to the SVM; + // As such, backend comparisons can rely on the SVM name. + return []drivers.OntapFlexGroupStorageBackendPool{{SvmUUID: d.GetAPI().GetSVMUUID()}} +} + func (d *NASFlexGroupStorageDriver) vserverAggregates(ctx context.Context, svmName string) ([]string, error) { var err error // Get the aggregates assigned to the SVM. There must be at least one! @@ -1626,7 +1648,7 @@ func (d *NASFlexGroupStorageDriver) ReconcileNodeAccess( // in physical pools list. func (d *NASFlexGroupStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "nfs", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_nas_flexgroup_test.go b/storage_drivers/ontap/ontap_nas_flexgroup_test.go index 3f28b2222..d179a711e 100644 --- a/storage_drivers/ontap/ontap_nas_flexgroup_test.go +++ b/storage_drivers/ontap/ontap_nas_flexgroup_test.go @@ -310,6 +310,7 @@ func TestOntapNasFlexgroupStorageDriverInitialize(t *testing.T) { mockAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, "nfs").Return([]string{"dataLIF"}, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-nas-flexgroup", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid") result := driver.Initialize(ctx, "CSI", configJSON, commonConfig, secrets, BackendUUID) @@ -365,6 +366,7 @@ func TestOntapNasFlexgroupStorageDriverInitialize_StoragePool(t *testing.T) { mockAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, "nfs").AnyTimes().Return([]string{"dataLIF"}, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-nas-flexgroup", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid").AnyTimes() if test.name == "flexgroupAggrListFailed" { configJSON, _ = getOntapStorageDriverConfigJson("true", "volume", "none", "", @@ -2717,6 +2719,19 @@ func TestOntapNasFlexgroupStorageDriverGetStorageBackendPhysicalPoolNames(t *tes assert.Equal(t, "pool1", poolNames[0], "Pool names are not equal") } +func TestOntapNasFlexgroupStorageDriverGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapNASFlexgroupDriver(t) + svmUUID := "SVM1-uuid" + pool := storage.NewStoragePool(nil, "pool1") + driver.physicalPool = pool + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + backendPool := pools[0] + assert.NotEmpty(t, pools) + assert.Equal(t, svmUUID, backendPool.SvmUUID) +} + func TestOntapNasFlexgroupStorageDriverGetInternalVolumeName(t *testing.T) { _, driver := newMockOntapNASFlexgroupDriver(t) driver.Config.StoragePrefix = utils.Ptr("storagePrefix_") diff --git a/storage_drivers/ontap/ontap_nas_qtree.go b/storage_drivers/ontap/ontap_nas_qtree.go index 611ab82ef..8918db7a3 100644 --- a/storage_drivers/ontap/ontap_nas_qtree.go +++ b/storage_drivers/ontap/ontap_nas_qtree.go @@ -179,6 +179,13 @@ func (d *NASQtreeStorageDriver) Initialize( return fmt.Errorf("error validating %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Ensure all quotas are in force after a driver restart d.queueAllFlexvolsForQuotaResize(ctx) @@ -475,7 +482,6 @@ func (d *NASQtreeStorageDriver) CreateClone( // If RO clone is requested, validate the snapshot directory access and return if cloneVolConfig.ReadOnlyClone { - _, flexvol, _, err := d.ParseQtreeInternalID(sourceVolConfig.InternalID) if err != nil { return errors.WrapWithNotFoundError(err, "error while getting flexvol") @@ -895,6 +901,10 @@ func (d *NASQtreeStorageDriver) CreateSnapshot( Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> CreateSnapshot") defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< CreateSnapshot") + if tridentconfig.DisableExtraFeatures { + return nil, errors.UnsupportedError(fmt.Sprintf("snapshots are not supported by backend type %s", d.Name())) + } + if volConfig.ReadOnlyClone { // This is a read-only volume and hence do not create snapshot of it return nil, fmt.Errorf("snapshot is not supported for a read-only volume") @@ -1560,6 +1570,31 @@ func (d *NASQtreeStorageDriver) GetStorageBackendPhysicalPoolNames(context.Conte return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NASQtreeStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapEconomyStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NASQtreeStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + // 3. FlexVol Name Prefix + svmUUID := d.GetAPI().GetSVMUUID() + flexVolPrefix := d.FlexvolNamePrefix() + backendPools := make([]drivers.OntapEconomyStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapEconomyStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + FlexVolPrefix: flexVolPrefix, + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + func (d *NASQtreeStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { return map[string]sa.Offer{ sa.BackendType: sa.NewStringOffer(d.Name()), @@ -2113,7 +2148,7 @@ func (d *NASQtreeStorageDriver) ReconcileNodeAccess( // in physical pools list. func (d *NASQtreeStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "nfs", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_nas_qtree_test.go b/storage_drivers/ontap/ontap_nas_qtree_test.go index 530347ab0..0116afc65 100644 --- a/storage_drivers/ontap/ontap_nas_qtree_test.go +++ b/storage_drivers/ontap/ontap_nas_qtree_test.go @@ -3140,6 +3140,35 @@ func TestGetStorageBackendSpecs_Success(t *testing.T) { assert.NoError(t, result, "Expected no error, got error") } +func TestOntapNasQtreeStorageDriverGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapNasQtreeDriver(t) + svmUUID := "SVM1-uuid" + flexVolPrefix := fmt.Sprintf("trident_qtree_pool_%s_", *driver.Config.StoragePrefix) + driver.flexvolNamePrefix = flexVolPrefix + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + assert.Equal(t, flexVolPrefix, pool.FlexVolPrefix) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + assert.Equal(t, flexVolPrefix, pool.FlexVolPrefix) +} + func TestNASQtreeStorageDriver_getQuotaDiskLimitSize_1Gi(t *testing.T) { mockCtrl := gomock.NewController(t) @@ -3732,7 +3761,30 @@ func TestCanSnapshot_InvalidSnapshotDir(t *testing.T) { assert.NotNil(t, result, "result is nil") } +func TestCreateSnapshot_Disabled(t *testing.T) { + _, driver := newMockOntapNasQtreeDriver(t) + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + InternalName: flexvol, + InternalID: volInternalID, + } + + snapConfig := &storage.SnapshotConfig{ + InternalName: "snap1", + VolumeInternalName: "vol1", + } + + _, err := driver.CreateSnapshot(ctx, snapConfig, volConfig) + + assert.Error(t, err, "no error occurred") +} + func TestCreateSnapshot_Success(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3765,6 +3817,9 @@ func TestCreateSnapshot_Success(t *testing.T) { } func TestCreateSnapshot_FailureErrorCheckingVolume(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3789,6 +3844,9 @@ func TestCreateSnapshot_FailureErrorCheckingVolume(t *testing.T) { } func TestCreateSnapshot_FailureNoVolumeExists(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3813,6 +3871,9 @@ func TestCreateSnapshot_FailureNoVolumeExists(t *testing.T) { } func TestCreateSnapshot_FailureSnapshotCreateFailed(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3838,6 +3899,9 @@ func TestCreateSnapshot_FailureSnapshotCreateFailed(t *testing.T) { } func TestCreateSnapshot_FailureSnapshotInfoFailed(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3870,6 +3934,9 @@ func TestCreateSnapshot_FailureSnapshotInfoFailed(t *testing.T) { } func TestCreateSnapshot_FailureNoSnapshots(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3898,6 +3965,9 @@ func TestCreateSnapshot_FailureNoSnapshots(t *testing.T) { } func TestCreateSnapshot_FailureWrongVolumeID(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3921,6 +3991,9 @@ func TestCreateSnapshot_FailureWrongVolumeID(t *testing.T) { } func TestGetSnapshot_Success(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3951,6 +4024,9 @@ func TestGetSnapshot_Success(t *testing.T) { } func TestGetSnapshot_FailureNoSnapshotReturned(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3967,7 +4043,8 @@ func TestGetSnapshot_FailureNoSnapshotReturned(t *testing.T) { mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") mockAPI.EXPECT().VolumeSnapshotInfo(ctx, snapConfig.InternalName, flexvol).Return( api.Snapshot{}, - errors.NotFoundError(fmt.Sprintf("snapshot %v not found for volume %v", snapConfig.InternalName, snapConfig.VolumeInternalName))) + errors.NotFoundError(fmt.Sprintf("snapshot %v not found for volume %v", snapConfig.InternalName, + snapConfig.VolumeInternalName))) snap, err := driver.GetSnapshot(ctx, snapConfig, volConfig) @@ -3976,6 +4053,9 @@ func TestGetSnapshot_FailureNoSnapshotReturned(t *testing.T) { } func TestGetSnapshot_FailureErrorFetchingSnapshots(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4001,6 +4081,9 @@ func TestGetSnapshot_FailureErrorFetchingSnapshots(t *testing.T) { } func TestGetSnapshot_FailureWrongVolumeID(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4023,6 +4106,9 @@ func TestGetSnapshot_FailureWrongVolumeID(t *testing.T) { } func TestGetSnapshots_Success(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4048,6 +4134,9 @@ func TestGetSnapshots_Success(t *testing.T) { } func TestGetSnapshots_SuccessDockerContext(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4073,6 +4162,9 @@ func TestGetSnapshots_SuccessDockerContext(t *testing.T) { } func TestGetSnapshots_FailureWrongVolumeID(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4091,6 +4183,9 @@ func TestGetSnapshots_FailureWrongVolumeID(t *testing.T) { } func TestGetSnapshots_FailureSnapshotListErr(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4116,6 +4211,9 @@ func TestGetSnapshots_FailureSnapshotListErr(t *testing.T) { } func TestDeleteSnapshot_Success(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ @@ -4139,6 +4237,9 @@ func TestDeleteSnapshot_Success(t *testing.T) { } func TestDeleteSnapshot_FailureSnapshotBusy(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) childVols := make([]string, 0) childVols = append(childVols, flexvol) @@ -4167,6 +4268,9 @@ func TestDeleteSnapshot_FailureSnapshotBusy(t *testing.T) { } func TestDeleteSnapshot_FailureWrongVolumeID(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + _, driver := newMockOntapNasQtreeDriver(t) childVols := make([]string, 0) childVols = append(childVols, flexvol) diff --git a/storage_drivers/ontap/ontap_nas_test.go b/storage_drivers/ontap/ontap_nas_test.go index 03fec577f..c5daba610 100644 --- a/storage_drivers/ontap/ontap_nas_test.go +++ b/storage_drivers/ontap/ontap_nas_test.go @@ -9,6 +9,7 @@ import ( "os" "reflect" "testing" + "time" "github.com/RoaringBitmap/roaring" "github.com/golang/mock/gomock" @@ -392,6 +393,7 @@ func TestOntapNasStorageDriverInitialize(t *testing.T) { mockAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, "nfs").Return([]string{"dataLIF"}, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-nas", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid") result := driver.Initialize(ctx, "CSI", configJSON, commonConfig, secrets, BackendUUID) @@ -673,8 +675,10 @@ func TestOntapNasStorageDriverVolumeClone(t *testing.T) { mockAPI.EXPECT().VolumeExists(ctx, "").Return(false, nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, volConfig.InternalName, volConfig.CloneSourceVolumeInternal, volConfig.CloneSourceSnapshotInternal, false).Return(nil) - mockAPI.EXPECT().VolumeSetComment(ctx, volConfig.InternalName, volConfig.InternalName, - "flexvol").Return(nil) + mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, gomock.Any(), gomock.Any(), + maxFlexvolCloneWait).Return("online", nil) + mockAPI.EXPECT().VolumeSetComment(ctx, volConfig.InternalName, volConfig.InternalName, "flexvol"). + Return(nil) mockAPI.EXPECT().VolumeMount(ctx, volConfig.InternalName, "/"+volConfig.InternalName).Return(nil) if test.NasType == sa.SMB { @@ -690,6 +694,74 @@ func TestOntapNasStorageDriverVolumeClone(t *testing.T) { } } +func TestOntapNasStorageDriverVolumeClone_ROClone(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + pool1 := storage.NewStoragePool(nil, "pool1") + pool1.SetInternalAttributes(map[string]string{ + "tieringPolicy": "none", + }) + driver.physicalPools = map[string]storage.Pool{"pool1": pool1} + driver.Config.SplitOnClone = "false" + + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + CloneSourceSnapshotInternal: "flexvol", + ReadOnlyClone: true, + } + + flexVol := api.Volume{ + Name: "flexvol", + Comment: "flexvol", + SnapshotDir: true, + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") + mockAPI.EXPECT().VolumeInfo(ctx, volConfig.CloneSourceVolumeInternal).Return(&flexVol, nil) + + result := driver.CreateClone(ctx, nil, volConfig, pool1) + fmt.Println(result) + + assert.NoError(t, result, "received error") +} + +func TestOntapNasStorageDriverVolumeClone_ROClone_Failure(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + pool1 := storage.NewStoragePool(nil, "pool1") + pool1.SetInternalAttributes(map[string]string{ + "tieringPolicy": "none", + }) + driver.physicalPools = map[string]storage.Pool{"pool1": pool1} + driver.Config.SplitOnClone = "false" + + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + CloneSourceSnapshotInternal: "flexvol", + ReadOnlyClone: true, + } + + // Set snapshot directory visibility to false + flexVol := api.Volume{ + Name: "flexvol", + Comment: "flexvol", + SnapshotDir: false, + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") + + // Creating a readonly clone only results in the driver looking up volume information and no other calls to ONTAP. + mockAPI.EXPECT().VolumeInfo(ctx, volConfig.CloneSourceVolumeInternal).Return(&flexVol, nil) + + result := driver.CreateClone(ctx, nil, volConfig, pool1) + + assert.Error(t, result, "expected error") +} + func TestOntapNasStorageDriverVolumeClone_StoragePoolUnset(t *testing.T) { mockAPI, driver := newMockOntapNASDriver(t) volConfig := &storage.VolumeConfig{ @@ -876,6 +948,8 @@ func TestOntapNasStorageDriverVolumeClone_SMBShareCreateFail(t *testing.T) { mockAPI.EXPECT().VolumeExists(ctx, "").Return(false, nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, volConfig.InternalName, volConfig.CloneSourceVolumeInternal, volConfig.CloneSourceSnapshotInternal, false).Return(nil) + mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, gomock.Any(), gomock.Any(), + maxFlexvolCloneWait).Return("online", nil) mockAPI.EXPECT().VolumeSetComment(ctx, volConfig.InternalName, volConfig.InternalName, "flexvol").Return(nil) mockAPI.EXPECT().VolumeMount(ctx, volConfig.InternalName, "/"+volConfig.InternalName).Return(nil) mockAPI.EXPECT().SMBShareExists(ctx, volConfig.InternalName).Return(false, nil) @@ -988,6 +1062,30 @@ func TestOntapNasStorageDriverVolumeDestroy_SnapmirrorDeleteFail(t *testing.T) { assert.Error(t, result) } +func TestOntapNasStorageDriverVolumeDestroy_SnapmirrorReleaseFail(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + svmName := "SVM1" + volName := "testVol" + volNameInternal := volName + "Internal" + volConfig := &storage.VolumeConfig{ + Size: "1g", + Name: volName, + InternalName: volNameInternal, + Encryption: "false", + FileSystem: "xfs", + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return(svmName) + mockAPI.EXPECT().VolumeExists(ctx, volNameInternal).Return(true, nil) + mockAPI.EXPECT().SnapmirrorDeleteViaDestination(ctx, volNameInternal, svmName).Return(nil) + mockAPI.EXPECT().SnapmirrorRelease(ctx, volNameInternal, + svmName).Return(fmt.Errorf("error releaseing snapmirror")) + + result := driver.Destroy(ctx, volConfig) + + assert.Error(t, result) +} + func TestOntapNasStorageDriverVolumeDestroy_Fail(t *testing.T) { mockAPI, driver := newMockOntapNASDriver(t) svmName := "SVM1" @@ -1438,6 +1536,31 @@ func TestOntapNasStorageDriverGetStorageBackendPhysicalPoolNames(t *testing.T) { assert.Equal(t, "pool1", poolNames[0], "Pool names are not equal") } +func TestOntapNasStorageDriverGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + svmUUID := "SVM1-uuid" + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pools[0].SvmUUID) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pools[1].SvmUUID) +} + func TestOntapNasStorageDriverGetInternalVolumeName(t *testing.T) { _, driver := newMockOntapNASDriver(t) driver.Config.StoragePrefix = utils.Ptr("storagePrefix_") @@ -1632,6 +1755,51 @@ func TestOntapNasStorageDriverCreateFollowup_WithJunctionPath_NASType_None(t *te assert.NoError(t, result) } +func TestOntapNasStorageDriverCreateFollowup_WithJunctionPath_ROClone_Success(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + InternalName: "vol1", + ReadOnlyClone: true, + CloneSourceVolumeInternal: "flexvol", + } + + flexVol := api.Volume{ + Name: "flexvol", + Comment: "flexvol", + JunctionPath: "/vol1", + AccessType: "rw", + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") + mockAPI.EXPECT().VolumeInfo(ctx, "flexvol").Return(&flexVol, nil) + + result := driver.CreateFollowup(ctx, volConfig) + + assert.NoError(t, result, "error occurred") +} + +func TestOntapNasStorageDriverCreateFollowup_WithJunctionPath_ROClone_Failure(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + InternalName: "vol1", + ReadOnlyClone: true, + CloneSourceVolumeInternal: "flexvol", + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") + mockAPI.EXPECT().VolumeInfo(ctx, "flexvol").Return(nil, api.ApiError("api error")) + + result := driver.CreateFollowup(ctx, volConfig) + + assert.Error(t, result, "expected error") +} + func TestOntapNasStorageDriverCreateFollowup_WithJunctionPath_NASType_SMB(t *testing.T) { mockAPI, driver := newMockOntapNASDriver(t) driver.Config.NASType = "smb" @@ -3198,3 +3366,47 @@ func TestOntapNasStorageDriverBackendName(t *testing.T) { assert.Equal(t, result, "myBackend") } + +func TestOntapNasStorageDriverUpdateMirror(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + mockAPI.EXPECT().SnapmirrorUpdate(ctx, "testVol", "testSnap") + + err := driver.UpdateMirror(ctx, "testVol", "testSnap") + assert.Error(t, err, "expected error") +} + +func TestOntapNasStorageDriverCheckMirrorTransferState(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + snapmirror := &api.Snapmirror{ + State: "snapmirrored", + RelationshipStatus: "idle", + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("fakesvm1") + mockAPI.EXPECT().SnapmirrorGet(ctx, "fakevolume1", "fakesvm1", "", "").Return(snapmirror, nil) + + result, err := driver.CheckMirrorTransferState(ctx, "fakevolume1") + + assert.Nil(t, result, "expected nil") + assert.Error(t, err, "expected error") +} + +func TestOntapStorageDriverGetMirrorTransferTime(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + timeNow := time.Now() + snapmirror := &api.Snapmirror{ + State: "snapmirrored", + RelationshipStatus: "idle", + EndTransferTime: &timeNow, + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("fakesvm1") + mockAPI.EXPECT().SnapmirrorGet(ctx, "fakevolume1", "fakesvm1", "", "").Return(snapmirror, nil) + + result, err := driver.GetMirrorTransferTime(ctx, "fakevolume1") + assert.NotNil(t, result, "received nil") + assert.NoError(t, err, "received error") +} diff --git a/storage_drivers/ontap/ontap_san.go b/storage_drivers/ontap/ontap_san.go index f94c68a18..c2ce64c89 100644 --- a/storage_drivers/ontap/ontap_san.go +++ b/storage_drivers/ontap/ontap_san.go @@ -116,10 +116,9 @@ func (d *SANStorageDriver) Initialize( } err = InitializeSANDriver(ctx, driverContext, d.API, &d.Config, d.validate, backendUUID) - - // clean up igroup for failed driver if err != nil { if d.Config.DriverContext == tridentconfig.ContextCSI { + // Clean up igroup for failed driver. err := d.API.IgroupDestroy(ctx, d.Config.IgroupName) if err != nil { Logc(ctx).WithError(err).WithField("igroup", d.Config.IgroupName).Warn("Error deleting igroup.") @@ -128,6 +127,13 @@ func (d *SANStorageDriver) Initialize( return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -713,8 +719,18 @@ func (d *SANStorageDriver) Destroy(ctx context.Context, volConfig *storage.Volum return fmt.Errorf("error reading LUN maps for volume %s: %v", name, err) } if lunID >= 0 { + publishInfo := utils.VolumePublishInfo{ + DevicePath: "", + VolumeAccessInfo: utils.VolumeAccessInfo{ + IscsiAccessInfo: utils.IscsiAccessInfo{ + IscsiTargetIQN: iSCSINodeName, + IscsiLunNumber: int32(lunID), + }, + }, + } + // Inform the host about the device removal - if _, err := utils.PrepareDeviceForRemoval(ctx, lunID, iSCSINodeName, true, false); err != nil { + if _, err := utils.PrepareDeviceForRemoval(ctx, &publishInfo, nil, true, false); err != nil { Logc(ctx).Error(err) } } @@ -958,6 +974,28 @@ func (d *SANStorageDriver) GetStorageBackendPhysicalPoolNames(context.Context) [ return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *SANStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "SANStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + svmUUID := d.GetAPI().GetSVMUUID() + backendPools := make([]drivers.OntapStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + func (d *SANStorageDriver) getStoragePoolAttributes(ctx context.Context) map[string]sa.Offer { client := d.GetAPI() mirroring, _ := client.IsSVMDRCapable(ctx) @@ -1285,7 +1323,7 @@ func (d *SANStorageDriver) ReconcileNodeAccess(ctx context.Context, nodes []*uti // in physical pools list. func (d *SANStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "iscsi", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_san_economy.go b/storage_drivers/ontap/ontap_san_economy.go index ef1550062..56f38c024 100644 --- a/storage_drivers/ontap/ontap_san_economy.go +++ b/storage_drivers/ontap/ontap_san_economy.go @@ -330,10 +330,9 @@ func (d *SANEconomyStorageDriver) Initialize( } err = InitializeSANDriver(ctx, driverContext, d.API, &d.Config, d.validate, backendUUID) - - // clean up igroup for failed driver if err != nil { if d.Config.DriverContext == tridentconfig.ContextCSI { + // Clean up igroup for failed driver. err := d.API.IgroupDestroy(ctx, d.Config.IgroupName) if err != nil { Logc(ctx).WithError(err).WithField("igroup", d.Config.IgroupName).Warn("Error deleting igroup.") @@ -342,6 +341,13 @@ func (d *SANEconomyStorageDriver) Initialize( return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -878,8 +884,18 @@ func (d *SANEconomyStorageDriver) Destroy(ctx context.Context, volConfig *storag return fmt.Errorf("error reading LUN maps for volume %s: %v", name, err) } if lunID >= 0 { + publishInfo := utils.VolumePublishInfo{ + DevicePath: "", + VolumeAccessInfo: utils.VolumeAccessInfo{ + IscsiAccessInfo: utils.IscsiAccessInfo{ + IscsiTargetIQN: iSCSINodeName, + IscsiLunNumber: int32(lunID), + }, + }, + } + // Inform the host about the device removal - if _, err := utils.PrepareDeviceForRemoval(ctx, lunID, iSCSINodeName, true, false); err != nil { + if _, err := utils.PrepareDeviceForRemoval(ctx, &publishInfo, nil, true, false); err != nil { Logc(ctx).Error(err) } } @@ -1713,6 +1729,31 @@ func (d *SANEconomyStorageDriver) GetStorageBackendPhysicalPoolNames(context.Con return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *SANEconomyStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapEconomyStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "SANEconomyStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + // 3. FlexVol Name Prefix + svmUUID := d.GetAPI().GetSVMUUID() + flexVolPrefix := d.FlexvolNamePrefix() + backendPools := make([]drivers.OntapEconomyStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapEconomyStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + FlexVolPrefix: flexVolPrefix, + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + func (d *SANEconomyStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { return map[string]sa.Offer{ sa.BackendType: sa.NewStringOffer(d.Name()), @@ -2154,7 +2195,7 @@ func (d *SANEconomyStorageDriver) ReconcileNodeAccess( // in physical pools list. func (d *SANEconomyStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "iscsi", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_san_economy_test.go b/storage_drivers/ontap/ontap_san_economy_test.go index 31d591378..ac3fa3960 100644 --- a/storage_drivers/ontap/ontap_san_economy_test.go +++ b/storage_drivers/ontap/ontap_san_economy_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 NetApp, Inc. All Rights Reserved. +// Copyright 2023 NetApp, Inc. All Rights Reserved. package ontap @@ -1591,11 +1591,17 @@ func TestOntapSanEconomyVolumePublish(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().LunList(ctx, gomock.Any()).Times(1).Return(api.Luns{api.Lun{Size: "1g", Name: "lunName", VolumeName: "volumeName"}}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/volumeName/storagePrefix_lunName") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/volumeName/storagePrefix_lunName").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -1623,11 +1629,17 @@ func TestOntapSanEconomyVolumePublishSLMError(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().LunList(ctx, gomock.Any()).Times(1).Return(api.Luns{api.Lun{Size: "1g", Name: "lunName", VolumeName: "volumeName"}}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/volumeName/storagePrefix_lunName") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/volumeName/storagePrefix_lunName").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -3024,6 +3036,35 @@ func TestOntapSanEconomyGetStorageBackendPhysicalPoolNames(t *testing.T) { assert.Equal(t, "pool1", poolNames[0], "Pool names are not equal") } +func TestOntapSanEconomyGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapSanEcoDriver(t) + svmUUID := "SVM1-uuid" + flexVolPrefix := fmt.Sprintf("trident_lun_pool_%s_", *driver.Config.StoragePrefix) + driver.flexvolNamePrefix = flexVolPrefix + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + assert.Equal(t, flexVolPrefix, pool.FlexVolPrefix) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + assert.Equal(t, flexVolPrefix, pool.FlexVolPrefix) +} + func TestOntapSanEconomyGetInternalVolumeName(t *testing.T) { _, d := newMockOntapSanEcoDriver(t) d.Config.StoragePrefix = utils.Ptr("storagePrefix_") @@ -3771,6 +3812,7 @@ func TestOntapSanEconomyInitialize(t *testing.T) { mockAPI.EXPECT().IscsiInitiatorGetDefaultAuth(ctx).Return(authResponse, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-san-economy", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid") result := d.Initialize(ctx, "csi", commonConfigJSON, commonConfig, secrets, BackendUUID) @@ -3901,6 +3943,7 @@ func TestOntapSanEconomyInitialize_NumOfLUNs(t *testing.T) { "trident", 5).AnyTimes() if !test.expectError { mockAPI.EXPECT().IscsiInitiatorGetDefaultAuth(ctx).Return(authResponse, nil) + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid").AnyTimes() } result := d.Initialize(ctx, "csi", commonConfigJSON, commonConfig, secrets, BackendUUID) diff --git a/storage_drivers/ontap/ontap_san_nvme.go b/storage_drivers/ontap/ontap_san_nvme.go index 841926e2a..f8cbcc8dd 100644 --- a/storage_drivers/ontap/ontap_san_nvme.go +++ b/storage_drivers/ontap/ontap_san_nvme.go @@ -24,7 +24,7 @@ import ( "github.com/netapp/trident/utils/errors" ) -// RegExp to match the namespace path either empty string or +// NVMeNamespaceRegExp RegExp to match the namespace path either empty string or // string of the form /vol// var NVMeNamespaceRegExp = regexp.MustCompile(`[^(\/vol\/.+\/.+)?$]`) @@ -109,7 +109,6 @@ func (d *NVMeStorageDriver) Initialize( if err != nil { return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) } - d.Config = *config // Unit tests mock the API layer, so we only use the real API interface if it doesn't already exist. if d.API == nil { @@ -117,19 +116,21 @@ func (d *NVMeStorageDriver) Initialize( return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) } } + // OntapStorageDriverConfig gets updated with the SVM name in InitializeOntapDriver() if the SVM name is not provided + // in the backend config json. Therefore, this is the proper place to assign it to d.Config. + d.Config = *config // Check NVMe feature support if !d.API.SupportsFeature(ctx, api.NVMeProtocol) { return fmt.Errorf("error initializing %s driver: ontap doesn't support NVMe", d.Name()) } - transport := "tcp" - if d.ips, err = d.API.NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)); err != nil { + if d.ips, err = d.API.NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport); err != nil { return err } if len(d.ips) == 0 { - return fmt.Errorf("no data LIFs with TCP protocol found on SVM %s", d.API.SVMName()) + return fmt.Errorf("no NVMe data LIFs found on SVM %s", d.API.SVMName()) } else { Logc(ctx).WithField("dataLIFs", d.ips).Debug("Found LIFs.") } @@ -144,6 +145,13 @@ func (d *NVMeStorageDriver) Initialize( return fmt.Errorf("error validating %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -798,6 +806,7 @@ func (d *NVMeStorageDriver) Unpublish( "name": name, "NVMeNamespaceUUID": volConfig.AccessInfo.NVMeNamespaceUUID, "NVMeSubsystemUUID": volConfig.AccessInfo.NVMeSubsystemUUID, + "hostNQN": publishInfo.HostNQN, } Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Unpublish") defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Unpublish") @@ -805,7 +814,13 @@ func (d *NVMeStorageDriver) Unpublish( subsystemUUID := volConfig.AccessInfo.NVMeSubsystemUUID namespaceUUID := volConfig.AccessInfo.NVMeNamespaceUUID - return d.API.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, namespaceUUID) + removePublishInfo, err := d.API.NVMeEnsureNamespaceUnmapped(ctx, publishInfo.HostNQN, subsystemUUID, namespaceUUID) + if removePublishInfo { + volConfig.AccessInfo.NVMeTargetIPs = []string{} + volConfig.AccessInfo.NVMeSubsystemNQN = "" + volConfig.AccessInfo.NVMeSubsystemUUID = "" + } + return err } // CanSnapshot determines whether a snapshot as specified in the provided snapshot config may be taken. @@ -934,6 +949,28 @@ func (d *NVMeStorageDriver) GetStorageBackendPhysicalPoolNames(context.Context) return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NVMeStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NVMeStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + svmUUID := d.GetAPI().GetSVMUUID() + backendPools := make([]drivers.OntapStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + // getStoragePoolAttributes returns the map for storage pool attributes. func (d *NVMeStorageDriver) getStoragePoolAttributes(ctx context.Context) map[string]sa.Offer { client := d.GetAPI() @@ -1259,6 +1296,15 @@ func (d *NVMeStorageDriver) ReconcileNodeAccess(_ context.Context, _ []*utils.No return nil } +// GetBackendState returns the reason if SVM is offline, and a flag to indicate if there is change +// in physical pools list. +func (d *NVMeStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { + Logc(ctx).Debug(">>>> GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") + + return getSVMState(ctx, d.API, sa.NVMeTransport, d.GetStorageBackendPhysicalPoolNames(ctx)) +} + // String makes NVMeStorageDriver satisfy the Stringer interface. func (d *NVMeStorageDriver) String() string { return utils.ToStringRedacted(&d, GetOntapDriverRedactList(), d.GetExternalConfig(context.Background())) diff --git a/storage_drivers/ontap/ontap_san_nvme_test.go b/storage_drivers/ontap/ontap_san_nvme_test.go index 848a841f7..7e382ef0e 100644 --- a/storage_drivers/ontap/ontap_san_nvme_test.go +++ b/storage_drivers/ontap/ontap_san_nvme_test.go @@ -20,10 +20,7 @@ import ( "github.com/netapp/trident/utils/errors" ) -var ( - mockIPs = []string{"0.0.0.0", "1.1.1.1"} - transport = "tcp" -) +var mockIPs = []string{"0.0.0.0", "1.1.1.1"} func newNVMeDriver(apiOverride api.OntapAPI) *NVMeStorageDriver { sPrefix := "test_" @@ -121,7 +118,7 @@ func TestNVMeInitialize_GetDataLifError(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return(nil, fmt.Errorf("error getting dataLifs")) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return(nil, fmt.Errorf("error getting dataLifs")) err := d.Initialize(ctx, tridentconfig.ContextCSI, configJSON, commonConfig, nil, BackendUUID) @@ -136,12 +133,12 @@ func TestNVMeInitialize_NoDataLifs(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return([]string{}, nil) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return([]string{}, nil) mAPI.EXPECT().SVMName().Return("svm") err := d.Initialize(ctx, tridentconfig.ContextCSI, configJSON, commonConfig, nil, BackendUUID) - assert.ErrorContains(t, err, "no data LIFs with TCP protocol found on SVM") + assert.ErrorContains(t, err, "no NVMe data LIFs found on SVM svm") } func TestNVMeInitialize_GetAggrNamesError(t *testing.T) { @@ -152,7 +149,7 @@ func TestNVMeInitialize_GetAggrNamesError(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return(mockIPs, nil) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return(mockIPs, nil) mAPI.EXPECT().IsSVMDRCapable(ctx).Return(true, nil) mAPI.EXPECT().GetSVMAggregateNames(ctx).Return(nil, fmt.Errorf("failed to get aggrs")) @@ -171,7 +168,7 @@ func TestNVMeInitialize_ValidateStoragePrefixError(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return(mockIPs, nil) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return(mockIPs, nil) mAPI.EXPECT().IsSVMDRCapable(ctx).Return(true, nil) mAPI.EXPECT().GetSVMAggregateNames(ctx).Return([]string{"data"}, nil) mAPI.EXPECT().GetSVMAggregateAttributes(ctx).Return(nil, nil) @@ -190,13 +187,14 @@ func TestNVMeInitialize_Success(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return(mockIPs, nil) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return(mockIPs, nil) mAPI.EXPECT().IsSVMDRCapable(ctx).Return(true, nil) mAPI.EXPECT().GetSVMAggregateNames(ctx).Return([]string{"data"}, nil) mAPI.EXPECT().GetSVMAggregateAttributes(ctx).Return(nil, nil) mAPI.EXPECT().SVMName().Return("svm") mAPI.EXPECT().EmsAutosupportLog(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mAPI.EXPECT().GetSVMUUID().Return("svm-uuid") err := d.Initialize(ctx, tridentconfig.ContextCSI, configJSON, commonConfig, nil, BackendUUID) @@ -248,6 +246,31 @@ func TestNVMeGetStorageBackendPhysicalPoolNames(t *testing.T) { assert.Equal(t, d.GetStorageBackendPhysicalPoolNames(ctx), []string{"pool1"}, "Physical pools are different.") } +func TestNVMeGetStorageBackendPools(t *testing.T) { + driver, mockAPI := newNVMeDriverAndMockApi(t) + svmUUID := "SVM1-uuid" + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) +} + func TestNVMeGetVolumeOpts(t *testing.T) { d := newNVMeDriver(nil) volConfig := storage.VolumeConfig{} @@ -988,7 +1011,7 @@ func TestUnpublish(t *testing.T) { // case 1: NVMeEnsureNamespaceUnmapped returned error volConfig.AccessInfo.NVMeNamespaceUUID = "fakeUUID" tridentconfig.CurrentDriverContext = tridentconfig.ContextCSI - mock.EXPECT().NVMeEnsureNamespaceUnmapped(ctx, gomock.Any(), gomock.Any()).Return(fmt.Errorf("NVMeEnsureNamespaceUnmapped returned error")) + mock.EXPECT().NVMeEnsureNamespaceUnmapped(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(false, fmt.Errorf("NVMeEnsureNamespaceUnmapped returned error")) err := d.Unpublish(ctx, volConfig, publishInfo) @@ -998,7 +1021,7 @@ func TestUnpublish(t *testing.T) { volConfig.AccessInfo.PublishEnforcement = true volConfig.AccessInfo.NVMeNamespaceUUID = "fakeUUID" tridentconfig.CurrentDriverContext = tridentconfig.ContextCSI - mock.EXPECT().NVMeEnsureNamespaceUnmapped(ctx, gomock.Any(), gomock.Any()).Return(nil) + mock.EXPECT().NVMeEnsureNamespaceUnmapped(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil) err = d.Unpublish(ctx, volConfig, publishInfo) @@ -1407,3 +1430,14 @@ func TestCreateNamespacePath(t *testing.T) { assert.Equal(t, nsNameExpected, nsNameGot) } + +func TestGetBackendState(t *testing.T) { + d, mAPI := newNVMeDriverAndMockApi(t) + + mAPI.EXPECT().GetSVMState(ctx).Return("", fmt.Errorf("returning test error")) + + reason, changeMap := d.GetBackendState(ctx) + + assert.Equal(t, reason, StateReasonSVMUnreachable, "should be 'SVM is not reachable'") + assert.NotNil(t, changeMap, "should not be nil") +} diff --git a/storage_drivers/ontap/ontap_san_test.go b/storage_drivers/ontap/ontap_san_test.go index 8203b111d..884a93b3b 100644 --- a/storage_drivers/ontap/ontap_san_test.go +++ b/storage_drivers/ontap/ontap_san_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 NetApp, Inc. All Rights Reserved. +// Copyright 2023 NetApp, Inc. All Rights Reserved. package ontap @@ -590,10 +590,16 @@ func TestOntapSanVolumePublishManaged(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().VolumeInfo(ctx, gomock.Any()).Times(1).Return(&api.Volume{AccessType: VolTypeRW}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/lunName/lun0") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/lunName/lun0").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -626,10 +632,16 @@ func TestOntapSanVolumePublishUnmanaged(t *testing.T) { Unmanaged: true, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().VolumeInfo(ctx, gomock.Any()).Times(1).Return(&api.Volume{AccessType: VolTypeRW}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/lunName/lun0") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/lunName/lun0").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -662,10 +674,16 @@ func TestOntapSanVolumePublishSLMError(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().VolumeInfo(ctx, gomock.Any()).Times(1).Return(&api.Volume{AccessType: VolTypeRW}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/lunName/lun0") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/lunName/lun0").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -1165,7 +1183,8 @@ func TestOntapSanVolumeClone(t *testing.T) { mockAPI.EXPECT().VolumeSnapshotCreate(ctx, gomock.Any(), gomock.Any()).Return(nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, []string{"online"}, []string{"error"}, maxFlexvolCloneWait).AnyTimes().Return("online", nil) + mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, []string{"online"}, []string{"error"}, + maxFlexvolCloneWait).AnyTimes().Return("online", nil) mockAPI.EXPECT().VolumeSetComment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) err := driver.CreateClone(ctx, volConfig, volConfig, pool1) @@ -1262,7 +1281,8 @@ func TestOntapSanVolumeClone_ValidationTest(t *testing.T) { mockAPI.EXPECT().VolumeSnapshotCreate(ctx, gomock.Any(), gomock.Any()).Return(nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, []string{"online"}, []string{"error"}, maxFlexvolCloneWait).AnyTimes().Return("online", nil) + mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, []string{"online"}, []string{"error"}, + maxFlexvolCloneWait).AnyTimes().Return("online", nil) mockAPI.EXPECT().VolumeSetComment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) mockAPI.EXPECT().LunSetQosPolicyGroup(ctx, gomock.Any(), gomock.Any()).Return(nil) }, @@ -1882,7 +1902,8 @@ func TestOntapSanVolumeSnapshot(t *testing.T) { ).MaxTimes(1) mockAPI.EXPECT().LunSize(ctx, gomock.Any()).Return(1073741824, nil) - mockAPI.EXPECT().VolumeSnapshotCreate(ctx, snapshotConfig.InternalName, snapshotConfig.VolumeInternalName).Return(nil) + mockAPI.EXPECT().VolumeSnapshotCreate(ctx, snapshotConfig.InternalName, + snapshotConfig.VolumeInternalName).Return(nil) mockAPI.EXPECT().VolumeSnapshotInfo(ctx, snapshotConfig.InternalName, snapshotConfig.VolumeInternalName).Return( api.Snapshot{ @@ -1922,14 +1943,16 @@ func TestOntapSanVolumeSnapshot_SnapshotNotFound(t *testing.T) { ).MaxTimes(1) mockAPI.EXPECT().LunSize(ctx, gomock.Any()).Return(1073741824, nil) - mockAPI.EXPECT().VolumeSnapshotCreate(ctx, snapshotConfig.InternalName, snapshotConfig.VolumeInternalName).Return(nil) + mockAPI.EXPECT().VolumeSnapshotCreate(ctx, snapshotConfig.InternalName, + snapshotConfig.VolumeInternalName).Return(nil) mockAPI.EXPECT().VolumeSnapshotInfo(ctx, snapshotConfig.InternalName, snapshotConfig.VolumeInternalName).Return( api.Snapshot{ CreateTime: "", Name: snapshotConfig.InternalName, }, - errors.NotFoundError("snapshot %v not found for volume %v", snapshotConfig.InternalName, snapshotConfig.VolumeInternalName)) + errors.NotFoundError("snapshot %v not found for volume %v", snapshotConfig.InternalName, + snapshotConfig.VolumeInternalName)) _, err := driver.CreateSnapshot(ctx, snapshotConfig, volConfig) @@ -2088,6 +2111,31 @@ func TestOntapSanVolumeGetStorageBackendSpecs(t *testing.T) { assert.NoError(t, err, "Failed to get the storage backend specification") } +func TestOntapSanStorageDriverGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapSANDriver(t) + svmUUID := "SVM1-uuid" + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) +} + func TestOntapSanVolumeGetInternalVolumeName(t *testing.T) { ctx := context.Background() @@ -2640,6 +2688,11 @@ func TestOntapSanVolumePublishisFlexvolRW(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + tests := []struct { name string mocks func(mockAPI *mockapi.MockOntapAPI) @@ -2692,6 +2745,7 @@ func TestOntapSanVolumePublishisFlexvolRW(t *testing.T) { mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/lunName/lun0") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/lunName/lun0").Return(dummyLun, nil) err := driver.Publish(ctx, volConfig, publishInfo) assert.Errorf(t, err, "no reporting nodes found") @@ -2838,6 +2892,7 @@ func TestOntapSanStorageDriverInitialize(t *testing.T) { mockAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, "iscsi").Return([]string{"1.1.1.1"}, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-san", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid") result := driver.Initialize(ctx, "CSI", configJSON, commonConfig, secrets, BackendUUID) @@ -3124,7 +3179,8 @@ func TestOntapSANStorageDriverEstablishMirror_Failure(t *testing.T) { { name: "ReplicationPolicyValidation_Fail", mocks: func(mockAPI *mockapi.MockOntapAPI) { - mockAPI.EXPECT().SnapmirrorPolicyGet(ctx, gomock.Any()).Times(2).Return(nil, fmt.Errorf("snap mirror fail")) + mockAPI.EXPECT().SnapmirrorPolicyGet(ctx, gomock.Any()).Times(2).Return(nil, + fmt.Errorf("snap mirror fail")) mockAPI.EXPECT().VolumeInfo(ctx, gomock.Any()).Return(&volume, nil) mockAPI.EXPECT().SnapmirrorGet(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( &api.Snapmirror{State: api.SnapmirrorStateSynchronizing}, nil) diff --git a/storage_drivers/solidfire/solidfire_san.go b/storage_drivers/solidfire/solidfire_san.go index 466849f7d..f2bf91a9e 100644 --- a/storage_drivers/solidfire/solidfire_san.go +++ b/storage_drivers/solidfire/solidfire_san.go @@ -1134,9 +1134,18 @@ func (d *SANStorageDriver) Destroy(ctx context.Context, volConfig *storage.Volum } if d.Config.DriverContext == tridentconfig.ContextDocker { + publishInfo := utils.VolumePublishInfo{ + DevicePath: "", + VolumeAccessInfo: utils.VolumeAccessInfo{ + IscsiAccessInfo: utils.IscsiAccessInfo{ + IscsiTargetIQN: v.Iqn, + IscsiLunNumber: 0, + }, + }, + } // Inform the host about the device removal - if _, err = utils.PrepareDeviceForRemoval(ctx, 0, v.Iqn, true, false); err != nil { + if _, err = utils.PrepareDeviceForRemoval(ctx, &publishInfo, nil, true, false); err != nil { Logc(ctx).Warningf("Unable to prepare device for removal, attempting to detach anyway: %v", err) } @@ -1180,7 +1189,7 @@ func (d *SANStorageDriver) Publish( // Get the fstype attrs, _ := v.Attributes.(map[string]interface{}) fstype := drivers.DefaultFileSystemType - if str, ok := attrs["fstype"].(string); ok { + if str, ok := attrs["fstype"].(string); ok && str != "" { fstype = str } diff --git a/storage_drivers/types.go b/storage_drivers/types.go index f4b04f488..e6e9caf70 100644 --- a/storage_drivers/types.go +++ b/storage_drivers/types.go @@ -72,6 +72,7 @@ type CommonStorageDriverConfig struct { StoragePrefixRaw json.RawMessage `json:"storagePrefix,string"` StoragePrefix *string `json:"-"` SerialNumbers []string `json:"serialNumbers,omitEmpty"` + BackendPools []string `json:"backendPools,omitEmpty"` DriverContext trident.DriverContext `json:"-"` LimitVolumeSize string `json:"limitVolumeSize"` Credentials map[string]string `json:"credentials"` @@ -148,6 +149,32 @@ type OntapStorageDriverPool struct { OntapStorageDriverConfigDefaults `json:"defaults"` } +// StorageBackendPool is a type constraint that enables drivers to generically report non-overlapping storage pools +// within a backend. +type StorageBackendPool interface { + OntapFlexGroupStorageBackendPool | OntapStorageBackendPool | OntapEconomyStorageBackendPool +} + +// OntapFlexGroupStorageBackendPool is a non-overlapping section of an ONTAP flexgroup backend that may be used for +// provisioning storage. +type OntapFlexGroupStorageBackendPool struct { + SvmUUID string `json:"svmUUID"` +} + +// OntapStorageBackendPool is a non-overlapping section of an ONTAP backend that may be used for provisioning storage. +type OntapStorageBackendPool struct { + SvmUUID string `json:"svmUUID"` + Aggregate string `json:"aggregate"` +} + +// OntapEconomyStorageBackendPool is a non-overlapping section of an ONTAP economy backend that may be used for +// provisioning storage. +type OntapEconomyStorageBackendPool struct { + SvmUUID string `json:"svmUUID"` + Aggregate string `json:"aggregate"` + FlexVolPrefix string `json:"flexVolPrefix"` +} + type OntapStorageDriverConfigDefaults struct { SpaceAllocation string `json:"spaceAllocation"` SpaceReserve string `json:"spaceReserve"` diff --git a/utils/devices.go b/utils/devices.go index 06263a747..542ff86bf 100644 --- a/utils/devices.go +++ b/utils/devices.go @@ -4,6 +4,7 @@ package utils import ( "bytes" + "encoding/hex" "fmt" "os" "path/filepath" @@ -19,7 +20,10 @@ import ( "github.com/netapp/trident/utils/errors" ) -const luksDevicePrefix = "luks-" +const ( + luksDevicePrefix = "luks-" + devPrefix = "/dev/" +) // waitForDevice accepts a device name and checks if it is present func waitForDevice(ctx context.Context, device string) error { @@ -42,7 +46,7 @@ func flushDevice(ctx context.Context, deviceInfo *ScsiDeviceInfo, force bool) er defer Logc(ctx).Debug("<<<< devices.flushDevice") for _, device := range deviceInfo.Devices { - err := flushOneDevice(ctx, "/dev/"+device) + err := flushOneDevice(ctx, devPrefix+device) if err != nil && !force { // Return error only if this is a standalone device, i.e. no multipath device is present for this device. // If a multipath device exists, then it should be flushed before flushing the device, @@ -153,7 +157,7 @@ func ISCSIRescanDevices(ctx context.Context, targetIQN string, lunID int32, minS allLargeEnough := true for _, diskDevice := range deviceInfo.Devices { - size, err := getISCSIDiskSize(ctx, "/dev/"+diskDevice) + size, err := getISCSIDiskSize(ctx, devPrefix+diskDevice) if err != nil { return err } @@ -173,7 +177,7 @@ func ISCSIRescanDevices(ctx context.Context, targetIQN string, lunID int32, minS if !allLargeEnough { time.Sleep(time.Second) for _, diskDevice := range deviceInfo.Devices { - size, err := getISCSIDiskSize(ctx, "/dev/"+diskDevice) + size, err := getISCSIDiskSize(ctx, devPrefix+diskDevice) if err != nil { return err } @@ -186,7 +190,7 @@ func ISCSIRescanDevices(ctx context.Context, targetIQN string, lunID int32, minS if deviceInfo.MultipathDevice != "" { multipathDevice := deviceInfo.MultipathDevice - size, err := getISCSIDiskSize(ctx, "/dev/"+multipathDevice) + size, err := getISCSIDiskSize(ctx, devPrefix+multipathDevice) if err != nil { return err } @@ -199,7 +203,7 @@ func ISCSIRescanDevices(ctx context.Context, targetIQN string, lunID int32, minS return err } time.Sleep(time.Second) - size, err = getISCSIDiskSize(ctx, "/dev/"+multipathDevice) + size, err = getISCSIDiskSize(ctx, devPrefix+multipathDevice) if err != nil { return err } @@ -224,7 +228,7 @@ func reloadMultipathDevice(ctx context.Context, multipathDevice string) error { return fmt.Errorf("cannot reload an empty multipathDevice") } - _, err := command.ExecuteWithTimeout(ctx, "multipath", 30*time.Second, true, "-r", "/dev/"+multipathDevice) + _, err := command.ExecuteWithTimeout(ctx, "multipath", 10*time.Second, true, "-r", devPrefix+multipathDevice) if err != nil { Logc(ctx).WithFields(LogFields{ "device": multipathDevice, @@ -328,7 +332,7 @@ func listAllISCSIDevices(ctx context.Context) { dmLog := make([]string, 0) sdLog := make([]string, 0) sysLog := make([]string, 0) - entries, _ := os.ReadDir("/dev/") + entries, _ := os.ReadDir(devPrefix) for _, entry := range entries { if strings.HasPrefix(entry.Name(), "dm-") { dmLog = append(dmLog, entry.Name()) @@ -457,7 +461,7 @@ func multipathFlushDevice(ctx context.Context, deviceInfo *ScsiDeviceInfo) error return nil } - devicePath := "/dev/" + deviceInfo.MultipathDevice + devicePath := devPrefix + deviceInfo.MultipathDevice deviceErr := canFlushMultipathDevice(ctx, devicePath) if deviceErr != nil { @@ -507,7 +511,7 @@ func GetMountedISCSIDevices(ctx context.Context) ([]*ScsiDeviceInfo, error) { mountedDevices := make([]string, 0) for _, procMount := range procSelfMountinfo { - hasDevMountSourcePrefix := strings.HasPrefix(procMount.MountSource, "/dev/") + hasDevMountSourcePrefix := strings.HasPrefix(procMount.MountSource, devPrefix) hasPvcMountPoint := strings.Contains(procMount.MountPoint, "/pvc-") if !hasPvcMountPoint { @@ -522,7 +526,7 @@ func GetMountedISCSIDevices(ctx context.Context) ([]*ScsiDeviceInfo, error) { Logc(ctx).Error(err) continue } - mountedDevice = strings.TrimPrefix(device, "/dev/") + mountedDevice = strings.TrimPrefix(device, devPrefix) } else { mountedDevice = strings.TrimPrefix(procMount.Root, "/") } @@ -862,10 +866,215 @@ func findDevicesForMultipathDevice(ctx context.Context, device string) []string return devices } -// PrepareDeviceForRemoval informs Linux that a device will be removed. -func PrepareDeviceForRemoval(ctx context.Context, lunID int, iSCSINodeName string, ignoreErrors, force bool) (string, error) { +// compareWithPublishedDevicePath verifies that published path matches the discovered device path +func compareWithPublishedDevicePath(ctx context.Context, publishInfo *VolumePublishInfo, + deviceInfo *ScsiDeviceInfo, +) (bool, error) { + isProbablyGhostDevice := false + discoverMpath := strings.TrimPrefix(deviceInfo.MultipathDevice, devPrefix) + publishedMpath := strings.TrimPrefix(publishInfo.DevicePath, devPrefix) + + if discoverMpath != publishedMpath { + // If this is the case, a wrong multipath device has been identified. + // Reset the Multipath device and disks + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "discoveredMultipathDevice": discoverMpath, + "publishedMultipathDevice": publishedMpath, + }).Debug("Discovered multipath device may not be correct,") + + deviceInfo.MultipathDevice = strings.TrimPrefix(publishedMpath, devPrefix) + deviceInfo.Devices = []string{} + + // Get Device based on the multipath value at the same time identify if it is a ghost device. + devices, err := IscsiUtils.GetMultipathDeviceDisks(ctx, deviceInfo.MultipathDevice) + if err != nil { + return false, fmt.Errorf("failed to verify multipath disks for '%v'; %v ", + deviceInfo.MultipathDevice, err) + } + + isProbablyGhostDevice = devices == nil || len(devices) == 0 + if isProbablyGhostDevice { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "multipathDevice": deviceInfo.MultipathDevice, + }).Debug("Multipath device may be a ghost device.") + } else { + deviceInfo.Devices = devices + } + + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "multipathDevice": deviceInfo.MultipathDevice, + "devices": deviceInfo.Devices, + }).Debug("Updated Multipath device and devices.") + } else { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "publishedMultipathDevice": publishedMpath, + "discoveredMultipathDevice": discoverMpath, + "devices": deviceInfo.Devices, + }).Debug("Discovered multipath device is valid.") + } + + return isProbablyGhostDevice, nil +} + +// compareWithPublishedSerialNumber verifies that device serial number matches the discovered LUNs +func compareWithPublishedSerialNumber(ctx context.Context, publishInfo *VolumePublishInfo, + deviceInfo *ScsiDeviceInfo, +) (bool, error) { + isProbablyGhostDevice := false + lunSerialCheckPassed := false + + for _, path := range deviceInfo.DevicePaths { + serial, err := getLunSerial(ctx, path) + if err != nil { + // LUN either isn't scanned yet, or this kernel + // doesn't support VPD page 80 in sysfs. Assume + // correctness and move on + Logc(ctx).WithError(err).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "path": path, + }).Error("LUN serial check skipped.") + continue + } + + lunSerialCheckPassed = serial != publishInfo.IscsiLunSerial + if lunSerialCheckPassed { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "path": path, + }).Error("LUN serial check failed.") + break + } + } + + // It means the multipath device found was wrong + if !lunSerialCheckPassed { + + // Get Device based on the serial number and at the same time identify if it is a ghost device. + // Multipath UUID contains LUN serial in hex format + lunSerialHex := hex.EncodeToString([]byte(publishInfo.IscsiLunSerial)) + multipathDevice, err := IscsiUtils.GetMultipathDeviceBySerial(ctx, lunSerialHex) + if err != nil { + return false, fmt.Errorf("failed to verify multipath device for serial '%v'; %v ", + publishInfo.IscsiLunSerial, err) + } + + deviceInfo.MultipathDevice = strings.TrimPrefix(multipathDevice, devPrefix) + + // Get Device based on the multipath value at the same time identify if it is a ghost device. + devices, err := IscsiUtils.GetMultipathDeviceDisks(ctx, multipathDevice) + if err != nil { + return false, fmt.Errorf("failed to verify multipath disks for '%v', "+ + "serial '%v'; %v", multipathDevice, publishInfo.IscsiLunSerial, err) + } + + isProbablyGhostDevice = devices == nil || len(devices) == 0 + if isProbablyGhostDevice { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "multipathDevice": multipathDevice, + }).Debug("Multipath device may be a ghost device.") + } else { + deviceInfo.Devices = devices + } + } + + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "multipathDevice": deviceInfo.MultipathDevice, + "devices": deviceInfo.Devices, + }).Debug("Discovered multipath device and devices have valid serial number.") + + return isProbablyGhostDevice, nil +} + +// compareWithAllPublishInfos comparing all publications (allPublishInfos) for +// LUN number uniqueness, if more than one publication exists with the same LUN number +// then it indicates a larger problem that user needs to manually fix +func compareWithAllPublishInfos(ctx context.Context, publishInfo *VolumePublishInfo, + allPublishInfos []VolumePublishInfo, deviceInfo *ScsiDeviceInfo, +) error { + // During unstaging at least 1 publish info should exist else + // there is some issue on the node. + if len(allPublishInfos) < 1 { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + }).Debug("Missing all the publish infos; re-requesting.") + + return errors.ISCSISameLunNumberError(fmt.Sprintf( + "failed to verify multipath device '%v' with LUN number '%v' due to missing publish infos", + deviceInfo.MultipathDevice, publishInfo.IscsiLunNumber)) + } + + // Identify if multiple publishInfos for a given targetIQN have the same LUN Number + var count int + for _, info := range allPublishInfos { + if publishInfo.IscsiLunNumber == info.IscsiLunNumber && publishInfo.IscsiTargetIQN == info.IscsiTargetIQN { + count++ + } + } + + if count > 1 { + listAllISCSIDevices(ctx) + + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "count": count, + }).Error("Found multiple publish infos with same LUN ID.") + + return fmt.Errorf("found multiple publish infos with same LUN ID '%d'; user need to correct the publish"+ + " information by including the missing 'devicePath' based on `multipath -ll` output", + publishInfo.IscsiLunNumber) + } + + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "count": count, + }).Debug("Found publish info with the same LUN ID.") + + return nil +} + +// verifyMultipathDevice verifies that device being removed is correct based on published device path, +// device serial number (if present) or comparing all publications (allPublishInfos) for +// LUN number uniqueness. +func verifyMultipathDevice(ctx context.Context, publishInfo *VolumePublishInfo, allPublishInfos []VolumePublishInfo, + deviceInfo *ScsiDeviceInfo, +) (bool, error) { + // Ensure a correct multipath device is being discovered. + // Following steps can be performed: + // 1. If DM device is known, compare it with deviceInfo.MultipathDevice + // If no match check if the DM device is a ghost device by checking /sys/block.../slaves and remove it. + // 2. Else if LUN SerialNumber is available, compare it with deviceInfo.Devices Serial Number + // If no match, find a DM device with the matching serial number, + // if a ghost device by checking /sys/block.../uuid then remove it. + // 3. Else if Check all tracking infos to ensure no more than 1 tracking files have the same LUN number. + // If multiple are found, then it requires user intervention. + + if publishInfo.DevicePath != "" { + return compareWithPublishedDevicePath(ctx, publishInfo, deviceInfo) + } else if publishInfo.IscsiLunSerial != "" { + return compareWithPublishedSerialNumber(ctx, publishInfo, deviceInfo) + } + + return false, compareWithAllPublishInfos(ctx, publishInfo, allPublishInfos, deviceInfo) +} + +// PrepareDeviceForRemoval informs Linux that a device will be removed, the function +// also verifies that device being removed is correct based on published device path, +// device serial number (if present) or comparing all publications (allPublishInfos) for +// LUN number uniqueness. +func PrepareDeviceForRemoval(ctx context.Context, publishInfo *VolumePublishInfo, allPublishInfos []VolumePublishInfo, ignoreErrors, + force bool, +) (string, error) { GenerateRequestContextForLayer(ctx, LogLayerUtils) + lunID := int(publishInfo.IscsiLunNumber) + iSCSINodeName := publishInfo.IscsiTargetIQN + fields := LogFields{ "lunID": lunID, "iSCSINodeName": iSCSINodeName, @@ -893,9 +1102,16 @@ func PrepareDeviceForRemoval(ctx context.Context, lunID int, iSCSINodeName strin return multipathDevice, nil } + if publishInfo.IscsiTargetPortal != "" /* CSI Case */ { + _, err = verifyMultipathDevice(ctx, publishInfo, allPublishInfos, deviceInfo) + if err != nil { + return multipathDevice, err + } + } + performDeferredDeviceRemoval, err = removeSCSIDevice(ctx, deviceInfo, ignoreErrors, force) if performDeferredDeviceRemoval && deviceInfo.MultipathDevice != "" { - multipathDevice = "/dev/" + deviceInfo.MultipathDevice + multipathDevice = devPrefix + deviceInfo.MultipathDevice } return multipathDevice, err @@ -1013,6 +1229,7 @@ type ScsiDeviceInfo struct { Target string LUN string Devices []string + DevicePaths []string MultipathDevice string Filesystem string IQN string @@ -1065,9 +1282,9 @@ func getDeviceInfoForLUN( var devicePath string if multipathDevice != "" { - devicePath = "/dev/" + multipathDevice + devicePath = devPrefix + multipathDevice } else { - devicePath = "/dev/" + devices[0] + devicePath = devPrefix + devices[0] } fsType := "" @@ -1084,7 +1301,7 @@ func getDeviceInfoForLUN( } Logc(ctx).WithFields(LogFields{ - "LUN": strconv.Itoa(lunID), + "lun": strconv.Itoa(lunID), "multipathDevice": multipathDevice, "fsType": fsType, "deviceNames": devices, @@ -1095,6 +1312,7 @@ func getDeviceInfoForLUN( LUN: strconv.Itoa(lunID), MultipathDevice: multipathDevice, Devices: devices, + DevicePaths: paths, Filesystem: fsType, IQN: iSCSINodeName, HostSessionMap: hostSessionMap, @@ -1121,7 +1339,7 @@ func getDeviceInfoForMountPath(ctx context.Context, mountpath string) (*ScsiDevi return nil, err } - device = strings.TrimPrefix(device, "/dev/") + device = strings.TrimPrefix(device, devPrefix) var deviceInfo *ScsiDeviceInfo diff --git a/utils/errors/errors.go b/utils/errors/errors.go index 3afad7ef7..8edeb4ecf 100644 --- a/utils/errors/errors.go +++ b/utils/errors/errors.go @@ -593,6 +593,28 @@ func IsISCSIDeviceFlushError(err error) bool { return ok } +// /////////////////////////////////////////////////////////////////////////// +// iSCSISameLunNumberError +// /////////////////////////////////////////////////////////////////////////// + +type iSCSISameLunNumberError struct { + message string +} + +func (e *iSCSISameLunNumberError) Error() string { return e.message } + +func ISCSISameLunNumberError(message string) error { + return &iSCSISameLunNumberError{message} +} + +func IsISCSISameLunNumberError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*iSCSISameLunNumberError) + return ok +} + // /////////////////////////////////////////////////////////////////////////// // tooManyRequestsError (HTTP 429) // /////////////////////////////////////////////////////////////////////////// diff --git a/utils/iscsi.go b/utils/iscsi.go index f53437ec4..4d6638ba2 100644 --- a/utils/iscsi.go +++ b/utils/iscsi.go @@ -5,6 +5,7 @@ package utils import ( "context" "encoding/binary" + "encoding/hex" "fmt" "os" "os/exec" @@ -58,17 +59,19 @@ var ( // AttachISCSIVolumeRetry attaches a volume with retry by invoking AttachISCSIVolume with backoff. func AttachISCSIVolumeRetry( ctx context.Context, name, mountpoint string, publishInfo *VolumePublishInfo, secrets map[string]string, timeout time.Duration, -) error { +) (int64, error) { Logc(ctx).Debug(">>>> iscsi.AttachISCSIVolumeRetry") defer Logc(ctx).Debug("<<<< iscsi.AttachISCSIVolumeRetry") var err error + var mpathSize int64 if err = ISCSIPreChecks(ctx); err != nil { - return err + return mpathSize, err } checkAttachISCSIVolume := func() error { - return AttachISCSIVolume(ctx, name, mountpoint, publishInfo, secrets) + mpathSize, err = AttachISCSIVolume(ctx, name, mountpoint, publishInfo, secrets) + return err } attachNotify := func(err error, duration time.Duration) { @@ -85,18 +88,21 @@ func AttachISCSIVolumeRetry( attachBackoff.MaxElapsedTime = timeout err = backoff.RetryNotify(checkAttachISCSIVolume, attachBackoff, attachNotify) - return err + return mpathSize, err } // AttachISCSIVolume attaches the volume to the local host. This method must be able to accomplish its task using only the data passed in. // It may be assumed that this method always runs on the host to which the volume will be attached. If the mountpoint // parameter is specified, the volume will be mounted. The device path is set on the in-out publishInfo parameter // so that it may be mounted later instead. -func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo *VolumePublishInfo, secrets map[string]string) error { +func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo *VolumePublishInfo, + secrets map[string]string, +) (int64, error) { Logc(ctx).Debug(">>>> iscsi.AttachISCSIVolume") defer Logc(ctx).Debug("<<<< iscsi.AttachISCSIVolume") var err error + var mpathSize int64 lunID := int(publishInfo.IscsiLunNumber) var portals []string @@ -125,38 +131,38 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo }).Debug("Attaching iSCSI volume.") if err = ISCSIPreChecks(ctx); err != nil { - return err + return mpathSize, err } // Ensure we are logged into correct portals pendingPortalsToLogin, loggedIn, err := portalsToLogin(ctx, publishInfo.IscsiTargetIQN, portals) if err != nil { - return err + return mpathSize, err } newLogin, err := EnsureISCSISessions(ctx, publishInfo, pendingPortalsToLogin) if !loggedIn && !newLogin { - return err + return mpathSize, err } // First attempt to fix invalid serials by rescanning them err = handleInvalidSerials(ctx, lunID, publishInfo.IscsiTargetIQN, publishInfo.IscsiLunSerial, rescanOneLun) if err != nil { - return err + return mpathSize, err } // Then attempt to fix invalid serials by purging them (to be scanned // again later) err = handleInvalidSerials(ctx, lunID, publishInfo.IscsiTargetIQN, publishInfo.IscsiLunSerial, purgeOneLun) if err != nil { - return err + return mpathSize, err } // Scan the target and wait for the device(s) to appear err = waitForDeviceScan(ctx, lunID, publishInfo.IscsiTargetIQN) if err != nil { Logc(ctx).Errorf("Could not find iSCSI device: %+v", err) - return err + return mpathSize, err } // At this point if the serials are still invalid, give up so the @@ -168,21 +174,21 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo } err = handleInvalidSerials(ctx, lunID, publishInfo.IscsiTargetIQN, publishInfo.IscsiLunSerial, failHandler) if err != nil { - return err + return mpathSize, err } // Wait for multipath device i.e. /dev/dm-* for the given LUN err = waitForMultipathDeviceForLUN(ctx, lunID, publishInfo.IscsiTargetIQN) if err != nil { - return err + return mpathSize, err } // Lookup all the SCSI device information deviceInfo, err := getDeviceInfoForLUN(ctx, lunID, publishInfo.IscsiTargetIQN, false, false) if err != nil { - return fmt.Errorf("error getting iSCSI device information: %v", err) + return mpathSize, fmt.Errorf("error getting iSCSI device information: %v", err) } else if deviceInfo == nil { - return fmt.Errorf("could not get iSCSI device information for LUN %d", lunID) + return mpathSize, fmt.Errorf("could not get iSCSI device information for LUN %d", lunID) } Logc(ctx).WithFields(LogFields{ @@ -192,24 +198,73 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo "iqn": deviceInfo.IQN, }).Debug("Found device.") - // Make sure we use the proper device (multipath if in use) + // Make sure we use the proper device deviceToUse := deviceInfo.Devices[0] if deviceInfo.MultipathDevice != "" { deviceToUse = deviceInfo.MultipathDevice + + // To avoid LUN ID conflict with a ghost device below checks + // are necessary: + // Conflict 1: Due to race conditons, it is possible a ghost + // DM device is discovered instead of the actual + // DM device. + // Conflict 2: Some OS like RHEL displays the ghost device size + // instead of the actual LUN size. + // + // Below check ensures that the correct device with the correct + // size is being discovered. + + // If LUN Serial Number exists, then compare it with DM + // device's UUID in sysfs + if err = verifyMultipathDeviceSerial(ctx, deviceToUse, publishInfo.IscsiLunSerial); err != nil { + return mpathSize, err + } + + // Once the multipath device has been found, compare its size with + // the size of one of the devices, if it differs then mark it for + // resize after the staging. + correctMpathSize, mpathSizeCorrect, err := verifyMultipathDeviceSize(ctx, deviceToUse, deviceInfo.Devices[0]) + if err != nil { + Logc(ctx).WithFields(LogFields{ + "scsiLun": deviceInfo.LUN, + "multipathDevice": deviceInfo.MultipathDevice, + "device": deviceInfo.Devices[0], + "iqn": deviceInfo.IQN, + "err": err, + }).Error("Failed to verify multipath device size.") + + return mpathSize, fmt.Errorf("failed to verify multipath device %s size", deviceInfo.MultipathDevice) + } + + if !mpathSizeCorrect { + mpathSize = correctMpathSize + + Logc(ctx).WithFields(LogFields{ + "scsiLun": deviceInfo.LUN, + "multipathDevice": deviceInfo.MultipathDevice, + "device": deviceInfo.Devices[0], + "iqn": deviceInfo.IQN, + "mpathSize": mpathSize, + }).Error("Multipath device size does not match device size.") + } + } else { + return mpathSize, fmt.Errorf("could not find multipath device for LUN %d", lunID) } + if deviceToUse == "" { - return fmt.Errorf("could not determine device to use for %v", name) + return mpathSize, fmt.Errorf("could not determine device to use for %v", name) } devicePath := "/dev/" + deviceToUse if err := waitForDevice(ctx, devicePath); err != nil { - return fmt.Errorf("could not find device %v; %s", devicePath, err) + return mpathSize, fmt.Errorf("could not find device %v; %s", devicePath, err) } var isLUKSDevice, luksFormatted bool if publishInfo.LUKSEncryption != "" { isLUKSDevice, err = strconv.ParseBool(publishInfo.LUKSEncryption) if err != nil { - return fmt.Errorf("could not parse LUKSEncryption into a bool, got %v", publishInfo.LUKSEncryption) + return mpathSize, fmt.Errorf("could not parse LUKSEncryption into a bool, got %v", + publishInfo.LUKSEncryption) } } @@ -217,7 +272,7 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo luksDevice, _ := NewLUKSDevice(devicePath, name) luksFormatted, err = EnsureLUKSDeviceMappedOnHost(ctx, luksDevice, name, secrets) if err != nil { - return err + return mpathSize, err } devicePath = luksDevice.MappedDevicePath() } @@ -226,36 +281,36 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo publishInfo.DevicePath = devicePath if publishInfo.FilesystemType == fsRaw { - return nil + return mpathSize, nil } existingFstype, err := getDeviceFSType(ctx, devicePath) if err != nil { - return err + return mpathSize, err } if existingFstype == "" { if !isLUKSDevice { if unformatted, err := isDeviceUnformatted(ctx, devicePath); err != nil { Logc(ctx).WithField("device", devicePath).Errorf("Unable to identify if the device is unformatted; err: %v", err) - return err + return mpathSize, err } else if !unformatted { Logc(ctx).WithField("device", devicePath).Errorf("Device is not unformatted; err: %v", err) - return fmt.Errorf("device %v is not unformatted", devicePath) + return mpathSize, fmt.Errorf("device %v is not unformatted", devicePath) } } else { // We can safely assume if we just luksFormatted the device, we can also add a filesystem without dataloss if !luksFormatted { Logc(ctx).WithField("device", devicePath).Errorf("Unable to identify if the luks device is empty; err: %v", err) - return err + return mpathSize, err } } Logc(ctx).WithFields(LogFields{"volume": name, "fstype": publishInfo.FilesystemType}).Debug("Formatting LUN.") err := formatVolume(ctx, devicePath, publishInfo.FilesystemType) if err != nil { - return fmt.Errorf("error formatting LUN %s, device %s: %v", name, deviceToUse, err) + return mpathSize, fmt.Errorf("error formatting LUN %s, device %s: %v", name, deviceToUse, err) } } else if existingFstype != unknownFstype && existingFstype != publishInfo.FilesystemType { Logc(ctx).WithFields(LogFields{ @@ -263,7 +318,7 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo "existingFstype": existingFstype, "requestedFstype": publishInfo.FilesystemType, }).Error("LUN already formatted with a different file system type.") - return fmt.Errorf("LUN %s, device %s already formatted with other filesystem: %s", + return mpathSize, fmt.Errorf("LUN %s, device %s already formatted with other filesystem: %s", name, deviceToUse, existingFstype) } else { Logc(ctx).WithFields(LogFields{ @@ -278,7 +333,7 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo // even if they are completely and automatically fixed, so we don't return any error here. mounted, err := IsMounted(ctx, devicePath, "", "") if err != nil { - return err + return mpathSize, err } if !mounted { _ = repairVolume(ctx, devicePath, publishInfo.FilesystemType) @@ -287,12 +342,12 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo // Optionally mount the device if mountpoint != "" { if err := MountDevice(ctx, devicePath, mountpoint, publishInfo.MountOptions, false); err != nil { - return fmt.Errorf("error mounting LUN %v, device %v, mountpoint %v; %s", + return mpathSize, fmt.Errorf("error mounting LUN %v, device %v, mountpoint %v; %s", name, deviceToUse, mountpoint, err) } } - return nil + return mpathSize, nil } // GetInitiatorIqns returns parsed contents of /etc/iscsi/initiatorname.iscsi @@ -376,6 +431,89 @@ func (h *IscsiReconcileHelper) GetDevicesForLUN(paths []string) ([]string, error return devices, nil } +// GetMultipathDeviceUUID find the /sys/block/dmX/dm/uuid UUID that contains DM device serial in hex format. +func (h *IscsiReconcileHelper) GetMultipathDeviceUUID(multipathDevicePath string) (string, error) { + multipathDevice := strings.TrimPrefix(multipathDevicePath, "/dev/") + + deviceUUIDPath := chrootPathPrefix + fmt.Sprintf("/sys/block/%s/dm/uuid", multipathDevice) + + exists, err := PathExists(deviceUUIDPath) + if !exists || err != nil { + return "", errors.NotFoundError("multipath device '%s' UUID not found", multipathDevice) + } + + UUID, err := os.ReadFile(deviceUUIDPath) + if err != nil { + return "", err + } + + return string(UUID), nil +} + +// GetMultipathDeviceDisks find the /sys/block/dmX/slaves/sdX disks. +func (h *IscsiReconcileHelper) GetMultipathDeviceDisks(ctx context.Context, multipathDevicePath string) ([]string, + error, +) { + devices := make([]string, 0) + multipathDevice := strings.TrimPrefix(multipathDevicePath, "/dev/") + + diskPath := chrootPathPrefix + fmt.Sprintf("/sys/block/%s/slaves/", multipathDevice) + diskDirs, err := os.ReadDir(diskPath) + if err != nil { + Logc(ctx).WithError(err).Errorf("Could not read %s", diskDirs) + return nil, fmt.Errorf("failed to identify multipath device disks; unable to read '%s'", diskDirs) + } + + for _, diskDir := range diskDirs { + contentName := diskDir.Name() + if !strings.HasPrefix(contentName, "sd") { + continue + } + + devices = append(devices, contentName) + } + + return devices, nil +} + +// GetMultipathDeviceBySerial find DM device whose UUID /sys/block/dmX/dm/uuid contains serial in hex format. +func (h *IscsiReconcileHelper) GetMultipathDeviceBySerial(ctx context.Context, hexSerial string) (string, error) { + sysPath := chrootPathPrefix + "/sys/block/" + + blockDirs, err := os.ReadDir(sysPath) + if err != nil { + Logc(ctx).WithError(err).Errorf("Could not read %s", sysPath) + return "", fmt.Errorf("failed to find multipath device by serial; unable to read '%s'", sysPath) + } + + for _, blockDir := range blockDirs { + dmDeviceName := blockDir.Name() + if !strings.HasPrefix(dmDeviceName, "dm-") { + continue + } + + uuid, err := h.GetMultipathDeviceUUID(dmDeviceName) + if err != nil { + Logc(ctx).WithFields(LogFields{ + "UUID": hexSerial, + "multipathDevice": dmDeviceName, + "err": err, + }).Error("Failed to get UUID of multipath device.") + continue + } + + if strings.Contains(uuid, hexSerial) { + Logc(ctx).WithFields(LogFields{ + "UUID": hexSerial, + "multipathDevice": dmDeviceName, + }).Debug("Found multipath device by UUID.") + return dmDeviceName, nil + } + } + + return "", errors.NotFoundError("no multipath device found") +} + // waitForDeviceScan scans all paths to a specific LUN and waits until all // SCSI disk-by-path devices for that LUN are present on the host. func waitForDeviceScan(ctx context.Context, lunID int, iSCSINodeName string) error { @@ -935,6 +1073,88 @@ func handleInvalidSerials( return nil } +// verifyMultipathDeviceSerial compares the serial number of the DM device with the serial +// of the LUN to ensure correct DM device has been discovered +func verifyMultipathDeviceSerial( + ctx context.Context, multipathDevice, lunSerial string, +) error { + if lunSerial == "" { + // Empty string means don't care + return nil + } + + // Multipath UUID contains LUN serial in hex format + lunSerialHex := hex.EncodeToString([]byte(lunSerial)) + + multipathDeviceUUID, err := IscsiUtils.GetMultipathDeviceUUID(multipathDevice) + if err != nil { + if errors.IsNotFoundError(err) { + // If UUID does not exist, then it is hard to verify the DM serial + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "lunSerialNumber": lunSerial, + }).Warn("Unable to verify multipath device serial.") + + return nil + } + + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "lunSerialNumber": lunSerial, + "error": err, + }).Error("Failed to verify multipath device serial.") + + return err + } + + if !strings.Contains(multipathDeviceUUID, lunSerialHex) { + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "lunSerialNumber": lunSerial, + "lunSerialNumberHex": lunSerialHex, + "multipathDeviceUUID": multipathDeviceUUID, + }).Error("Failed to verify multipath device serial.") + + return fmt.Errorf("multipath device '%s' serial check failed", multipathDevice) + } + + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "lunSerialNumber": lunSerial, + "lunSerialNumberHex": lunSerialHex, + "multipathDeviceUUID": multipathDeviceUUID, + }).Debug("Multipath device serial check passed.") + + return nil +} + +// verifyMultipathDeviceSize compares the size of the DM device with the size +// of a device to ensure correct DM device has the correct size. +func verifyMultipathDeviceSize( + ctx context.Context, multipathDevice, device string, +) (int64, bool, error) { + deviceSize, err := getISCSIDiskSize(ctx, "/dev/"+device) + if err != nil { + return 0, false, err + } + + mpathSize, err := getISCSIDiskSize(ctx, "/dev/"+multipathDevice) + if err != nil { + return 0, false, err + } + + if deviceSize != mpathSize { + return deviceSize, false, nil + } + + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "device": device, + }).Debug("Multipath device size check passed.") + + return 0, true, nil +} + // GetISCSIHostSessionMapForTarget returns a map of iSCSI host numbers to iSCSI session numbers // for a given iSCSI target. func (h *IscsiReconcileHelper) GetISCSIHostSessionMapForTarget(ctx context.Context, iSCSINodeName string) map[int]int { diff --git a/utils/iscsi_types.go b/utils/iscsi_types.go index 550a57492..d9c379f31 100644 --- a/utils/iscsi_types.go +++ b/utils/iscsi_types.go @@ -9,6 +9,9 @@ import ( type IscsiReconcileUtils interface { GetISCSIHostSessionMapForTarget(context.Context, string) map[int]int GetSysfsBlockDirsForLUN(int, map[int]int) []string + GetMultipathDeviceUUID(string) (string, error) + GetMultipathDeviceBySerial(context.Context, string) (string, error) + GetMultipathDeviceDisks(context.Context, string) ([]string, error) GetDevicesForLUN(paths []string) ([]string, error) ReconcileISCSIVolumeInfo(ctx context.Context, trackingInfo *VolumeTrackingInfo) (bool, error) } diff --git a/utils/utils.go b/utils/utils.go index 9a706bc54..aad88c927 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -6,6 +6,8 @@ import ( "bytes" "context" "crypto/rand" + "encoding/base64" + "encoding/json" "fmt" "net" "net/http" @@ -1049,3 +1051,37 @@ func SlicePtrs[T any](slice []T) []*T { } return result } + +func EncodeObjectToBase64String(object any) (string, error) { + if object == nil { + return "", fmt.Errorf("cannot encode nil object") + } + + // Serialize the object data to JSON + bytes, err := json.Marshal(object) + if err != nil { + return "", fmt.Errorf("failed encode object; %v", object) + } + + // Encode JSON bytes to a string + return base64.StdEncoding.EncodeToString(bytes), nil +} + +func DecodeBase64StringToObject(encodedObject string, destination any) error { + if encodedObject == "" { + return fmt.Errorf("cannot decode empty encoded string") + } + + // Decode the data from a string + bytes, err := base64.StdEncoding.DecodeString(encodedObject) + if err != nil { + return fmt.Errorf("failed to decode string; %s", encodedObject) + } + + // Deserialize the bytes into the destination + err = json.Unmarshal(bytes, &destination) + if err != nil { + return fmt.Errorf("failed to unmarshal bytes into destination of type: %t", reflect.TypeOf(destination)) + } + return nil +} diff --git a/utils/utils_test.go b/utils/utils_test.go index 464238930..6e6425c03 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -1523,3 +1523,122 @@ func TestDNS1123Regexes_MatchString(t *testing.T) { }) } } + +func TestEncodeObjectToBase64String_Fails(t *testing.T) { + // Object is nil. + encodedObj, err := EncodeObjectToBase64String(nil) + assert.Empty(t, encodedObj) + assert.Error(t, err) + + // Object is an unmarshal-able type. + encodedObj, err = EncodeObjectToBase64String(func() {}) + assert.Empty(t, encodedObj) + assert.Error(t, err) +} + +func TestEncodeObjectToBase64String_Succeeds(t *testing.T) { + type testObject struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + Baz string `json:"baz,omitempty"` + } + + // Object is non-nil, but empty. + encodedObj, err := EncodeObjectToBase64String(testObject{}) + assert.NotNil(t, encodedObj) + assert.NoError(t, err) + + // Object is an object with fields filled in. + obj := testObject{ + Foo: "foo_test", + Bar: "bar_test", + Baz: "baz_test", + } + encodedObj, err = EncodeObjectToBase64String(obj) + assert.NotNil(t, encodedObj) + assert.NoError(t, err) +} + +func TestDecodeBase64StringToObject_Fails(t *testing.T) { + type testObject struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + Baz string `json:"baz,omitempty"` + } + + // Encoded object is an empty string. + actualObject := testObject{} + err := DecodeBase64StringToObject("", &actualObject) + assert.Empty(t, actualObject.Foo) + assert.Empty(t, actualObject.Bar) + assert.Empty(t, actualObject.Baz) + assert.Error(t, err) + + // Encoded object is an invalid value for a base64 string. + actualObject = testObject{} + err = DecodeBase64StringToObject("%", &actualObject) + assert.Empty(t, actualObject.Foo) + assert.Empty(t, actualObject.Bar) + assert.Empty(t, actualObject.Baz) + assert.Error(t, err) + + // Encoded object contains non-ASCII characters for a base64 string. + actualObject = testObject{} + err = DecodeBase64StringToObject("ß-11234567890987654321234567890", &actualObject) + assert.Empty(t, actualObject.Foo) + assert.Empty(t, actualObject.Bar) + assert.Empty(t, actualObject.Baz) + assert.Error(t, err) +} + +func TestDecodeBase64StringToObject_Succeeds(t *testing.T) { + type testObject struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + Baz string `json:"baz,omitempty"` + } + + // Encoded object is an empty string. + actualObject := testObject{} + expectedObject := testObject{Foo: "foo_test", Bar: "bar_test", Baz: "baz_test"} + err := DecodeBase64StringToObject( + "eyJmb28iOiJmb29fdGVzdCIsImJhciI6ImJhcl90ZXN0IiwiYmF6IjoiYmF6X3Rlc3QifQ==", + &actualObject, + ) + assert.EqualValues(t, expectedObject, actualObject) + assert.NoError(t, err) + + // Encoded object is an empty string. + actualObject = testObject{} + expectedObject = testObject{Foo: "foo_test", Bar: "bar_test", Baz: "baz_test"} + err = DecodeBase64StringToObject( + "eyJmb28iOiJmb29fdGVzdCIsImJhciI6ImJhcl90ZXN0IiwiYmF6IjoiYmF6X3Rlc3QifQ==", + &actualObject, + ) + assert.EqualValues(t, expectedObject, actualObject) + assert.NoError(t, err) +} + +func TestEncodeAndDecodeToAndFromBase64(t *testing.T) { + type testObject struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + Baz string `json:"baz,omitempty"` + } + + // Create a test object and encoded it. + originalObject := testObject{Foo: "foo_test", Bar: "bar_test", Baz: "baz_test"} + encodedObject, err := EncodeObjectToBase64String(originalObject) + assert.NoError(t, err) + assert.NotNil(t, encodedObject) + + // Decode the encoded test object and ensure the values extracted object and its values are equivalent to + // those present in the original object. + var actualObject testObject + err = DecodeBase64StringToObject(encodedObject, &actualObject) + assert.NoError(t, err) + assert.NotNil(t, encodedObject) + assert.Equal(t, originalObject.Foo, actualObject.Foo) + assert.Equal(t, originalObject.Bar, actualObject.Bar) + assert.Equal(t, originalObject.Baz, actualObject.Baz) +}