Skip to content

Commit

Permalink
ci: add disk tags integration tests
Browse files Browse the repository at this point in the history
    - deploy longhorn v1.5.3
    - update the deployment CRD
    - Add tags with provision
    - Remove tags from provisioned blockdevice
    - Add tags to provisioned blockdevice

Signed-off-by: Vicente Cheng <[email protected]>
  • Loading branch information
Vicente-Cheng committed Dec 5, 2023
1 parent 36aad9b commit daef788
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 16 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/basic-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ jobs:
git clone https://github.com/harvester/vagrant-rancherd ndm-vagrant-rancherd
pushd ndm-vagrant-rancherd
./new_cluster.sh
yq e -i ".longhorn_version = \"1.4.2\"" settings.yaml
yq e -i ".longhorn_version = \"1.5.3\"" settings.yaml
./scripts/deploy_longhorn.sh
cp ../ci/scripts/deploy_ndm.sh ./deploy_ndm.sh
./deploy_ndm.sh
Expand Down
17 changes: 3 additions & 14 deletions ci/scripts/deploy_ndm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,23 +37,12 @@ ensure_longhorn_ready() {

# ensure instance-manager-e ready
while [ true ]; do
running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager-e |grep Running |awk '{print $3}' |wc -l)
running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "instance-manager-e pods are ready!"
echo "instance-manager pods are ready!"
break
fi
echo "check instance-manager-e failure, please deploy longhorn first."
exit 1
done

# ensure instance-manager-r ready
while [ true ]; do
running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager-r |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "instance-manager-r pods are ready!"
break
fi
echo "check instance-manager-r failure, please deploy longhorn first."
echo "check instance-manager failure, please deploy longhorn first."
exit 1
done
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,11 @@ spec:
nodeName:
description: name of the node to which the block device is attached
type: string
tags:
description: a string with for device tag for provisioner, e.g. "default,small,ssd"
items:
type: string
type: array
required:
- devPath
- fileSystem
Expand Down Expand Up @@ -277,6 +282,11 @@ spec:
- Inactive
- Unknown
type: string
tags:
description: The current Tags of the blockdevice
items:
type: string
type: array
required:
- provisionPhase
- state
Expand Down
53 changes: 52 additions & 1 deletion tests/integration/test_0_single_disk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,8 @@ func (s *SingleDiskSuite) Test_2_ManuallyProvisionSingleDisk() {
require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active")
newBlockdevice := curBlockdevice.DeepCopy()
newBlockdevice.Spec.FileSystem.Provisioned = true
targetTags := []string{"default", "test-disk"}
newBlockdevice.Spec.Tags = targetTags
bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{})

// sleep 3 seconds to wait controller handle
Expand All @@ -159,7 +161,56 @@ func (s *SingleDiskSuite) Test_2_ManuallyProvisionSingleDisk() {
// check for the added status
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should be empty after we remove disk!")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")
require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active")
require.Equal(s.T(), targetTags, curBlockdevice.Status.Tags, "Block device tags should be the same")
}

func (s *SingleDiskSuite) Test_3_RemoveTags() {
require.NotEqual(s.T(), s.targetDiskName, "", "target disk name should not be empty before we do the remove test")
bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system")
curBlockdevice, err := bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get Blockdevices should not get error")

require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active")
newBlockdevice := curBlockdevice.DeepCopy()
targetTags := []string{"default"}
newBlockdevice.Spec.Tags = targetTags
bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{})

// sleep 3 seconds to wait controller handle
time.Sleep(3 * time.Second)

// check for the added status
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")
require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active")
require.Equal(s.T(), targetTags, curBlockdevice.Status.Tags, "Block device tags should be the same")
}

func (s *SingleDiskSuite) Test_4_AddTags() {
require.NotEqual(s.T(), s.targetDiskName, "", "target disk name should not be empty before we do the remove test")
bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system")
curBlockdevice, err := bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get Blockdevices should not get error")

require.Equal(s.T(), curBlockdevice.Status.State, diskv1.BlockDeviceActive, "Block device state should be Active")
newBlockdevice := curBlockdevice.DeepCopy()
targetTags := []string{"default", "test-disk-2"}
newBlockdevice.Spec.Tags = targetTags
bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{})

// sleep 3 seconds to wait controller handle
time.Sleep(3 * time.Second)

// check for the added status
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should not be empty after we provision disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")
require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active")
require.Equal(s.T(), targetTags, curBlockdevice.Status.Tags, "Block device tags should be the same")
}

0 comments on commit daef788

Please sign in to comment.