Skip to content

Commit

Permalink
ci: add disk tags integration tests
Browse files Browse the repository at this point in the history
    - deploy longhorn v1.5.3
    - update the deployment CRD
    - Add tags with provision
    - Remove tags from provisioned blockdevice
    - Add tags to provisioned blockdevice
    - Add more info with upgrade operation

Signed-off-by: Vicente Cheng <[email protected]>
(cherry picked from commit 44f601c)
  • Loading branch information
Vicente-Cheng committed Jan 22, 2024
1 parent 4c3c80d commit 5f3c0c2
Show file tree
Hide file tree
Showing 5 changed files with 77 additions and 18 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/basic-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ jobs:
git clone https://github.com/harvester/vagrant-rancherd ndm-vagrant-rancherd
pushd ndm-vagrant-rancherd
./new_cluster.sh
yq e -i ".longhorn_version = \"1.4.2\"" settings.yaml
yq e -i ".longhorn_version = \"1.5.3\"" settings.yaml
./scripts/deploy_longhorn.sh
cp ../ci/scripts/deploy_ndm.sh ./deploy_ndm.sh
./deploy_ndm.sh
Expand Down
17 changes: 3 additions & 14 deletions ci/scripts/deploy_ndm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,23 +37,12 @@ ensure_longhorn_ready() {

# ensure instance-manager-e ready
while [ true ]; do
running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager-e |grep Running |awk '{print $3}' |wc -l)
running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "instance-manager-e pods are ready!"
echo "instance-manager pods are ready!"
break
fi
echo "check instance-manager-e failure, please deploy longhorn first."
exit 1
done

# ensure instance-manager-r ready
while [ true ]; do
running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager-r |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "instance-manager-r pods are ready!"
break
fi
echo "check instance-manager-r failure, please deploy longhorn first."
echo "check instance-manager failure, please deploy longhorn first."
exit 1
done
}
Expand Down
2 changes: 2 additions & 0 deletions ci/scripts/upgrade_ndm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ rm -rf ndm-override.yaml
cp -r ../deploy/charts/harvester-node-disk-manager harvester-node-disk-manager
cp ../ci/charts/ndm-override.yaml ndm-override.yaml

target_img=$(yq -e .image.repository ndm-override.yaml)
echo "upgrade target image: ${target_img}, upgrading ..."
$HELM upgrade -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager harvester-node-disk-manager/ -n harvester-system

sleep 10 # wait 10 seconds for ndm start to respwan pods
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,11 @@ spec:
nodeName:
description: name of the node to which the block device is attached
type: string
tags:
description: a string with for device tag for provisioner, e.g. "default,small,ssd"
items:
type: string
type: array
required:
- devPath
- fileSystem
Expand Down Expand Up @@ -277,6 +282,11 @@ spec:
- Inactive
- Unknown
type: string
tags:
description: The current Tags of the blockdevice
items:
type: string
type: array
required:
- provisionPhase
- state
Expand Down
64 changes: 61 additions & 3 deletions tests/integration/test_0_single_disk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"testing"
"time"

gocommon "github.com/harvester/go-common"
"github.com/kevinburke/ssh_config"
"github.com/melbahja/goph"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -151,15 +152,72 @@ func (s *SingleDiskSuite) Test_2_ManuallyProvisionSingleDisk() {
require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active")
newBlockdevice := curBlockdevice.DeepCopy()
newBlockdevice.Spec.FileSystem.Provisioned = true
targetTags := []string{"default", "test-disk"}
newBlockdevice.Spec.Tags = targetTags
bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{})

// sleep 3 seconds to wait controller handle
time.Sleep(3 * time.Second)
// sleep 30 seconds to wait controller handle
time.Sleep(30 * time.Second)

// check for the added status
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")
require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active")
require.Eventually(s.T(), func() bool {
return gocommon.SliceContentCmp(targetTags, curBlockdevice.Status.Tags)
}, 60*time.Second, 3*time.Second, "Block device tags should be the same")
}

func (s *SingleDiskSuite) Test_3_RemoveTags() {
require.NotEqual(s.T(), s.targetDiskName, "", "target disk name should not be empty before we do the remove test")
bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system")
curBlockdevice, err := bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get Blockdevices should not get error")

require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active")
newBlockdevice := curBlockdevice.DeepCopy()
targetTags := []string{"default"}
newBlockdevice.Spec.Tags = targetTags
bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{})

// sleep 30 seconds to wait controller handle
time.Sleep(30 * time.Second)

// check for the added status
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")
require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active")
require.Eventually(s.T(), func() bool {
return gocommon.SliceContentCmp(targetTags, curBlockdevice.Status.Tags)
}, 60*time.Second, 3*time.Second, "Block device tags should be the same")
}

func (s *SingleDiskSuite) Test_4_AddTags() {
require.NotEqual(s.T(), s.targetDiskName, "", "target disk name should not be empty before we do the remove test")
bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system")
curBlockdevice, err := bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get Blockdevices should not get error")

require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active")
newBlockdevice := curBlockdevice.DeepCopy()
targetTags := []string{"default", "test-disk-2"}
newBlockdevice.Spec.Tags = targetTags
bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{})

// sleep 30 seconds to wait controller handle
time.Sleep(30 * time.Second)

// check for the added status
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should be empty after we remove disk!")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")
require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active")
require.Eventually(s.T(), func() bool {
return gocommon.SliceContentCmp(targetTags, curBlockdevice.Status.Tags)
}, 60*time.Second, 3*time.Second, "Block device tags should be the same")
}

0 comments on commit 5f3c0c2

Please sign in to comment.