diff --git a/.github/workflows/basic-ci.yaml b/.github/workflows/basic-ci.yaml index 2896166d..5f5ae869 100644 --- a/.github/workflows/basic-ci.yaml +++ b/.github/workflows/basic-ci.yaml @@ -46,7 +46,7 @@ jobs: git clone https://github.com/harvester/vagrant-rancherd ndm-vagrant-rancherd pushd ndm-vagrant-rancherd ./new_cluster.sh - yq e -i ".longhorn_version = \"1.4.2\"" settings.yaml + yq e -i ".longhorn_version = \"1.5.3\"" settings.yaml ./scripts/deploy_longhorn.sh cp ../ci/scripts/deploy_ndm.sh ./deploy_ndm.sh ./deploy_ndm.sh diff --git a/ci/scripts/deploy_ndm.sh b/ci/scripts/deploy_ndm.sh index 76f648af..06739f0c 100755 --- a/ci/scripts/deploy_ndm.sh +++ b/ci/scripts/deploy_ndm.sh @@ -37,23 +37,12 @@ ensure_longhorn_ready() { # ensure instance-manager-e ready while [ true ]; do - running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager-e |grep Running |awk '{print $3}' |wc -l) + running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager |grep Running |awk '{print $3}' |wc -l) if [[ $running_num -eq ${cluster_nodes} ]]; then - echo "instance-manager-e pods are ready!" + echo "instance-manager pods are ready!" break fi - echo "check instance-manager-e failure, please deploy longhorn first." - exit 1 - done - - # ensure instance-manager-r ready - while [ true ]; do - running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager-r |grep Running |awk '{print $3}' |wc -l) - if [[ $running_num -eq ${cluster_nodes} ]]; then - echo "instance-manager-r pods are ready!" - break - fi - echo "check instance-manager-r failure, please deploy longhorn first." + echo "check instance-manager failure, please deploy longhorn first." exit 1 done } diff --git a/deploy/charts/harvester-node-disk-manager/templates/crds/harvesterhci.io_blockdevices.yaml b/deploy/charts/harvester-node-disk-manager/templates/crds/harvesterhci.io_blockdevices.yaml index 58c08361..b896b49d 100644 --- a/deploy/charts/harvester-node-disk-manager/templates/crds/harvesterhci.io_blockdevices.yaml +++ b/deploy/charts/harvester-node-disk-manager/templates/crds/harvesterhci.io_blockdevices.yaml @@ -84,6 +84,11 @@ spec: nodeName: description: name of the node to which the block device is attached type: string + tags: + description: a string with for device tag for provisioner, e.g. "default,small,ssd" + items: + type: string + type: array required: - devPath - fileSystem @@ -277,6 +282,11 @@ spec: - Inactive - Unknown type: string + tags: + description: The current Tags of the blockdevice + items: + type: string + type: array required: - provisionPhase - state diff --git a/tests/integration/test_0_single_disk_test.go b/tests/integration/test_0_single_disk_test.go index dc079e0d..6492387f 100644 --- a/tests/integration/test_0_single_disk_test.go +++ b/tests/integration/test_0_single_disk_test.go @@ -151,15 +151,66 @@ func (s *SingleDiskSuite) Test_2_ManuallyProvisionSingleDisk() { require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active") newBlockdevice := curBlockdevice.DeepCopy() newBlockdevice.Spec.FileSystem.Provisioned = true + targetTags := []string{"default", "test-disk"} + newBlockdevice.Spec.Tags = targetTags bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{}) - // sleep 3 seconds to wait controller handle - time.Sleep(3 * time.Second) + // sleep 30 seconds to wait controller handle + time.Sleep(30 * time.Second) + + // check for the added status + curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{}) + require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove") + require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!") + require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned") + require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active") + require.Equal(s.T(), targetTags, curBlockdevice.Status.Tags, "Block device tags should be the same") +} + +func (s *SingleDiskSuite) Test_3_RemoveTags() { + require.NotEqual(s.T(), s.targetDiskName, "", "target disk name should not be empty before we do the remove test") + bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system") + curBlockdevice, err := bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{}) + require.Equal(s.T(), err, nil, "Get Blockdevices should not get error") + + require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active") + newBlockdevice := curBlockdevice.DeepCopy() + targetTags := []string{"default"} + newBlockdevice.Spec.Tags = targetTags + bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{}) + + // sleep 30 seconds to wait controller handle + time.Sleep(30 * time.Second) + + // check for the added status + curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{}) + require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove") + require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!") + require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned") + require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active") + require.Equal(s.T(), targetTags, curBlockdevice.Status.Tags, "Block device tags should be the same") +} + +func (s *SingleDiskSuite) Test_4_AddTags() { + require.NotEqual(s.T(), s.targetDiskName, "", "target disk name should not be empty before we do the remove test") + bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system") + curBlockdevice, err := bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{}) + require.Equal(s.T(), err, nil, "Get Blockdevices should not get error") + + require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active") + newBlockdevice := curBlockdevice.DeepCopy() + targetTags := []string{"default", "test-disk-2"} + newBlockdevice.Spec.Tags = targetTags + bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{}) + + // sleep 30 seconds to wait controller handle + time.Sleep(30 * time.Second) // check for the added status curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{}) require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove") - require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should be empty after we remove disk!") + require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!") require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned") require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active") + require.Equal(s.T(), targetTags, curBlockdevice.Status.Tags, "Block device tags should be the same") }