From 0aeb8c6351a4007932ae44560cd4ce09885cbfb5 Mon Sep 17 00:00:00 2001 From: rohan2794 Date: Mon, 23 Sep 2024 14:51:05 +0530 Subject: [PATCH] fix: refactor lvm tests and add lvm tests to lvm testplan Signed-off-by: rohan2794 --- common/k8stest/util_fio_app.go | 4 + common/lvm/util.go | 2 +- common/zfs/util.go | 2 +- src/tests/lvm/common/util_resize.go | 128 ++++++----- src/tests/lvm/common/util_snapshot.go | 46 ++++ .../lvm_custom_node_topology_test.go | 213 +++++++++++------- .../lvm_ha_controller_test.go | 64 ++---- .../lvm_thick_volume_resize_test.go | 42 ++-- .../lvm_thin_volume_resize_test.go | 49 ++-- .../lvm_volume_snapshot_test.go | 81 ++++--- .../zfs_custom_node_topology_test.go | 12 +- testplans/lvm.yaml | 7 +- 12 files changed, 388 insertions(+), 262 deletions(-) create mode 100644 src/tests/lvm/common/util_snapshot.go diff --git a/common/k8stest/util_fio_app.go b/common/k8stest/util_fio_app.go index 865a4e59..eb779ddd 100644 --- a/common/k8stest/util_fio_app.go +++ b/common/k8stest/util_fio_app.go @@ -241,6 +241,10 @@ func (dfa *FioApplication) DeployFio(fioArgsSet common.FioAppArgsSet, podPrefix func (dfa *FioApplication) CreateVolume() error { var err error + if dfa.status.createdPVC || dfa.status.importedVolume { + return nil + } + decoration := dfa.OpenEbsEngine.String() if dfa.VolType.String() == "" { diff --git a/common/lvm/util.go b/common/lvm/util.go index e11903c6..c04f3363 100644 --- a/common/lvm/util.go +++ b/common/lvm/util.go @@ -148,7 +148,7 @@ func SetupLvmNodes(vgName string, size int64) (LvmNodesDevicePvVgConfig, error) if strings.Contains(workerNodes[0], "kind-") { imgDir = "/host/host/mnt" } else { - imgDir = "/tmp" + imgDir = "/mnt" } loopDevice := e2e_agent.LoopDevice{ diff --git a/common/zfs/util.go b/common/zfs/util.go index 4879d5f9..66b772a8 100644 --- a/common/zfs/util.go +++ b/common/zfs/util.go @@ -119,7 +119,7 @@ func SetupZfsNodes(poolName string, size int64) (ZfsNodesDevicePoolConfig, error if strings.Contains(workerNodes[0], "kind-") { imgDir = "/host/host/mnt" } else { - imgDir = "/tmp" + imgDir = "/mnt" } loopDevice := e2e_agent.LoopDevice{ Size: size, diff --git a/src/tests/lvm/common/util_resize.go b/src/tests/lvm/common/util_resize.go index 4b3b7c33..5f8b48b5 100644 --- a/src/tests/lvm/common/util_resize.go +++ b/src/tests/lvm/common/util_resize.go @@ -1,10 +1,10 @@ package common import ( + "fmt" "time" "github.com/openebs/openebs-e2e/common/e2e_agent" - "github.com/openebs/openebs-e2e/common/lvm" "github.com/openebs/openebs-e2e/common/mayastor/volume_resize" "github.com/openebs/openebs-e2e/common" @@ -15,85 +15,85 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" ) -var NodeConfig lvm.LvmNodesDevicePvVgConfig +var ResizeApp k8stest.FioApplication +var ResizeApp2 k8stest.FioApplication +var defFioCompletionTime = 240 // in seconds +var ThinPoolNode string -func LvmVolumeResizeTest(decor string, engine common.OpenEbsEngine, volType common.VolumeType, fstype common.FileSystemType, volBindModeWait bool, thinProvisioned common.YesNoVal) { +func LvmVolumeResizeTest(decor string, engine common.OpenEbsEngine, vgName string, volType common.VolumeType, fstype common.FileSystemType, volBindModeWait bool, thinProvisioned common.YesNoVal) { var ftSize1, ftSize2 uint64 - app := k8stest.FioApplication{ - Decor: decor, - VolSizeMb: 4096, - OpenEbsEngine: engine, - VolType: volType, - FsType: fstype, - Loops: 5, - VolWaitForFirstConsumer: volBindModeWait, - SkipPvcVerificationAfterCreate: true, - // after fio completes sleep of a long time - PostOpSleep: 600000, - } - - loopDevice := e2e_agent.LoopDevice{ - Size: 10737418240, - ImgDir: "/tmp", + // setup sc parameters + lvmScOptions := k8stest.LvmOptions{ + VolGroup: vgName, + Storage: "lvm", + ThinProvision: thinProvisioned, } - workerNodes, err := lvm.ListLvmNode(common.NSOpenEBS()) - Expect(err).ToNot(HaveOccurred(), "failed to list worker node") - - NodeConfig = lvm.LvmNodesDevicePvVgConfig{ - VgName: "lvmvg", - NodeDeviceMap: make(map[string]e2e_agent.LoopDevice), // Properly initialize the map - } - for _, node := range workerNodes { - NodeConfig.NodeDeviceMap[node] = loopDevice + ResizeApp = k8stest.FioApplication{ + Decor: decor, + VolSizeMb: 1024, + OpenEbsEngine: engine, + VolType: volType, + FsType: fstype, + Loops: 3, + VolWaitForFirstConsumer: volBindModeWait, + Lvm: lvmScOptions, + // after fio completes sleep of a long time + PostOpSleep: 600000, + AllowVolumeExpansion: common.AllowVolumeExpansionEnable, } - logf.Log.Info("setup node with loop device, pv and vg", "node config", NodeConfig) - err = NodeConfig.ConfigureLvmNodesWithDeviceAndVg() - Expect(err).ToNot(HaveOccurred(), "failed to setup node") - - // setup sc parameters - app.Lvm = k8stest.LvmOptions{ - VolGroup: NodeConfig.VgName, - Storage: "lvm", - ThinProvision: thinProvisioned, + if ResizeApp.FsType == common.BtrfsFsType { + ResizeApp.FsPercent = 60 } logf.Log.Info("create sc, pvc, fio pod") - err = app.DeployApplication() + err := ResizeApp.DeployApplication() Expect(err).To(BeNil(), "failed to deploy app") + var node string + if thinProvisioned == common.Yes { + node, err = k8stest.GetNodeNameForScheduledPod(ResizeApp.GetPodName(), common.NSDefault) + Expect(err).To(BeNil(), "failed to get node name for %s app", ResizeApp.GetPodName()) + nodeIp, err := k8stest.GetNodeIPAddress(node) + Expect(err).To(BeNil(), "failed to get node %s ip", node) + ThinPoolNode = *nodeIp + logf.Log.Info("App node", "name", node, "IP", ThinPoolNode) + out, err := e2e_agent.LvmLvChangeMonitor(ThinPoolNode, vgName) + Expect(err).To(BeNil(), "failed to set up lv change monitor on node %s with vg %s, output: %s", node, vgName, out) + } + // sleep for 30 seconds before resizing volume logf.Log.Info("Sleep before resizing volume", "duration", volume_resize.DefSleepTime) time.Sleep(time.Duration(volume_resize.DefSleepTime) * time.Second) - expandedVolumeSizeMb := app.VolSizeMb + 1024 + expandedVolumeSizeMb := ResizeApp.VolSizeMb + 1024 // expand volume by editing pvc size logf.Log.Info("Update volume size", "new size in MiB", expandedVolumeSizeMb) - _, err = k8stest.UpdatePvcSize(app.GetPvcName(), common.NSDefault, expandedVolumeSizeMb) - Expect(err).ToNot(HaveOccurred(), "failed to expand volume %s, error: %v", app.GetPvcName(), err) + _, err = k8stest.UpdatePvcSize(ResizeApp.GetPvcName(), common.NSDefault, expandedVolumeSizeMb) + Expect(err).ToNot(HaveOccurred(), "failed to expand volume %s, error: %v", ResizeApp.GetPvcName(), err) // verify pvc capacity to new size logf.Log.Info("Verify pvc resize status") - pvcResizeStatus, err := volume_resize.WaitForPvcResize(app.GetPvcName(), common.NSDefault, expandedVolumeSizeMb) - Expect(err).ToNot(HaveOccurred(), "failed to verify resized pvc %s, error: %v", app.GetPvcName(), err) - Expect(pvcResizeStatus).To(BeTrue(), "failed to resized pvc %s, error: %v", app.GetPvcName(), err) + pvcResizeStatus, err := volume_resize.WaitForPvcResize(ResizeApp.GetPvcName(), common.NSDefault, expandedVolumeSizeMb) + Expect(err).ToNot(HaveOccurred(), "failed to verify resized pvc %s, error: %v", ResizeApp.GetPvcName(), err) + Expect(pvcResizeStatus).To(BeTrue(), "failed to resized pvc %s, error: %v", ResizeApp.GetPvcName(), err) // Check fio pod status logf.Log.Info("Check fio pod status") - phase, _, err := k8stest.CheckFioPodCompleted(app.GetPodName(), common.NSDefault) + phase, _, err := k8stest.CheckFioPodCompleted(ResizeApp.GetPodName(), common.NSDefault) Expect(err).To(BeNil(), "CheckPodComplete got error %s", err) Expect(phase).ShouldNot(Equal(coreV1.PodFailed), "fio pod phase is %s", phase) // wait for fio completion - monitoring log output - exitValue, fErr := app.WaitFioComplete(volume_resize.DefFioCompletionTime, 5) + exitValue, fErr := ResizeApp.WaitFioComplete(volume_resize.DefFioCompletionTime, 5) Expect(fErr).ToNot(HaveOccurred()) logf.Log.Info("fio complete", "exit value", exitValue) Expect(exitValue == 0).Should(BeTrue(), "fio exit value is not 0") // print fio target sizes retrieved by monitoring log output - ftSizes, ffErr := app.FioTargetSizes() + ftSizes, ffErr := ResizeApp.FioTargetSizes() Expect(ffErr).ToNot(HaveOccurred()) for path, size := range ftSizes { logf.Log.Info("ftSize (poc_resize_1)", "path", path, "size", volume_resize.ByteSizeString(size), "bytes", size) @@ -102,23 +102,26 @@ func LvmVolumeResizeTest(decor string, engine common.OpenEbsEngine, volType comm Expect(len(ftSizes)).To(BeNumerically("==", 1), "unexpected fio target sizes") // second instance of e2e-fio, volume parameters should be the same as the 1st app instance - app2 := app - app2.Decor = app.Decor + "second-app" + ResizeApp2 = k8stest.FioApplication{ + Decor: fmt.Sprintf("%s-2", ResizeApp.Decor), + Loops: 2, + FsPercent: ResizeApp.FsPercent, + } - // Before deploying the 2nd app instance - "import" the volume - // from the first app - err = app2.ImportVolumeFromApp(&app) + // Before deploying the 2nd app instance - "import" the volume from the first app + err = ResizeApp2.ImportVolumeFromApp(&ResizeApp) Expect(err).ToNot(HaveOccurred(), "import volume failed") - // then deploy - err = app2.DeployApplication() + + // then deploy second fio app to use resized volume + err = ResizeApp2.DeployApplication() Expect(err).ToNot(HaveOccurred(), "deploy 2nd app failed") - exitValue, fErr = app2.WaitFioComplete(volume_resize.DefFioCompletionTime, 5) + exitValue, fErr = ResizeApp2.WaitFioComplete(volume_resize.DefFioCompletionTime, 5) Expect(fErr).ToNot(HaveOccurred()) logf.Log.Info("fio complete", "exit value", exitValue) Expect(exitValue == 0).Should(BeTrue(), "fio exit value is not 0") - ftSizes, ffErr = app2.FioTargetSizes() + ftSizes, ffErr = ResizeApp2.FioTargetSizes() Expect(ffErr).ToNot(HaveOccurred()) for path, size := range ftSizes { logf.Log.Info("ftSize (poc_resize_2)", "path", path, "size", volume_resize.ByteSizeString(size), "bytes", size) @@ -131,15 +134,20 @@ func LvmVolumeResizeTest(decor string, engine common.OpenEbsEngine, volType comm Expect(ftSize2).To(BeNumerically(">", ftSize1)) // second app should complete normally - err = app2.WaitComplete(volume_resize.DefFioCompletionTime) - Expect(err).ToNot(HaveOccurred(), "app2 did not complete") + err = ResizeApp2.WaitComplete(defFioCompletionTime) + Expect(err).ToNot(HaveOccurred(), "ResizeApp2 did not complete") // cleanup the second instance of e2e-fio app - err = app2.Cleanup() + err = ResizeApp2.Cleanup() Expect(err).ToNot(HaveOccurred(), "app2 cleanup failed") // cleanup the first instance of e2e-fio app - err = app.Cleanup() + err = ResizeApp.Cleanup() Expect(err).ToNot(HaveOccurred(), "app1 cleanup failed") + if thinProvisioned == common.Yes { + out, err := e2e_agent.LvmLvRemoveThinPool(ThinPoolNode, vgName) + Expect(err).To(BeNil(), "failed to remove lv thin pool on node %s with vg %s, output: %s", node, vgName, out) + ThinPoolNode = "" + } } diff --git a/src/tests/lvm/common/util_snapshot.go b/src/tests/lvm/common/util_snapshot.go new file mode 100644 index 00000000..2dd4832a --- /dev/null +++ b/src/tests/lvm/common/util_snapshot.go @@ -0,0 +1,46 @@ +package common + +import ( + "fmt" + + "github.com/openebs/openebs-e2e/common/k8stest" + "github.com/openebs/openebs-e2e/common/mayastor/snapshot" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// LvmVolumeSnapshotVerify verify snapshot and content to be ready +// it also verify that snapshot and content restore size should be zero +func LvmVolumeSnapshotVerify(snapshotName, snapshotContentName, namespace string, skipSnapError bool) (bool, error) { + + logf.Log.Info("Verify lvm snapshot content ready status") + contentReady, err := snapshot.WaitForSnapshotContentReadyStatus(snapshotContentName, skipSnapError) + if err != nil { + return contentReady, err + } else if !contentReady { + logf.Log.Info("Snapshot content not ready", "VolumeSnapshotContent.status.readyToUse", contentReady) + return contentReady, err + } + logf.Log.Info("Verify snapshot ready status") + snapshotReady, err := snapshot.WaitForSnapshotReadyStatus(snapshotName, namespace, skipSnapError) + if err != nil { + return snapshotReady, err + } else if !snapshotReady { + logf.Log.Info("Snapshot not ready", "VolumeSnapshot.status.readyToUse", snapshotReady) + return snapshotReady, err + } + + logf.Log.Info("Verify snapshot restore size is zero") + restoreSize, err := k8stest.GetSnapshotRestoreSize(snapshotName, namespace) + if err != nil { + return false, err + } + + restoreSizeInt, conversionStatus := restoreSize.AsInt64() + if !conversionStatus { + return false, fmt.Errorf("failed to convert snapshot restore size into int:, restore size: %v", restoreSize) + } else if restoreSizeInt != 0 { + return false, fmt.Errorf("snapshot restore size is not 0") + } + return true, nil +} diff --git a/src/tests/lvm/lvm_custom_node_topology/lvm_custom_node_topology_test.go b/src/tests/lvm/lvm_custom_node_topology/lvm_custom_node_topology_test.go index 4248c48d..3d2e73c7 100644 --- a/src/tests/lvm/lvm_custom_node_topology/lvm_custom_node_topology_test.go +++ b/src/tests/lvm/lvm_custom_node_topology/lvm_custom_node_topology_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/openebs/openebs-e2e/common/e2e_agent" "github.com/openebs/openebs-e2e/common/e2e_config" "github.com/openebs/openebs-e2e/common/e2e_ginkgo" "github.com/openebs/openebs-e2e/common/lvm" @@ -20,7 +19,9 @@ import ( ) var nodeConfig lvm.LvmNodesDevicePvVgConfig -var allowedTopologyEnvName = "ALLOWED_TOPOLOGIES" +var appInstances []*k8stest.FioApplication +var targetNode, key string +var csiNodeUpdateTime = 60 // in seconds var defDaemonsetReadyTime = 120 // in seconds @@ -35,33 +36,15 @@ Scenario: Node custom node topology for immediate volume binding */ func customTopologyImmediateTest(decor string, engine common.OpenEbsEngine, volType common.VolumeType, fstype common.FileSystemType, volBindModeWait bool) { - - loopDevice := e2e_agent.LoopDevice{ - Size: 10737418240, - ImgDir: "/tmp", - } - workerNodes, err := lvm.ListLvmNode(common.NSOpenEBS()) Expect(err).ToNot(HaveOccurred(), "failed to list worker node") - nodeConfig = lvm.LvmNodesDevicePvVgConfig{ - VgName: "lvmvg", - NodeDeviceMap: make(map[string]e2e_agent.LoopDevice), // Properly initialize the map - } - for _, node := range workerNodes { - nodeConfig.NodeDeviceMap[node] = loopDevice - } - - logf.Log.Info("setup node with loop device, pv and vg", "node config", nodeConfig) - err = nodeConfig.ConfigureLvmNodesWithDeviceAndVg() - Expect(err).ToNot(HaveOccurred(), "failed to setup node") - // minimum worker nodes in cluster should be two - Expect(len(workerNodes)).Should(BeNumerically(">=", 2), + Expect(len(workerNodes)).Should(BeNumerically(">=", 1), "test case requires are least 2 worker nodes, %d nodes found", len(workerNodes)) - key := "lvme2e/nodename" - targetNode := workerNodes[0] + key = "lvme2e/nodename" + targetNode = workerNodes[0] // label worker node err = k8stest.LabelNode(targetNode, key, targetNode) @@ -82,11 +65,11 @@ func customTopologyImmediateTest(decor string, engine common.OpenEbsEngine, volT }, }, } - var appInstances []*k8stest.FioApplication + appInstances = []*k8stest.FioApplication{} for i := 0; i <= len(workerNodes); i++ { app := k8stest.FioApplication{ - Decor: decor, + Decor: fmt.Sprintf("%s-%d", decor, i), VolSizeMb: 1024, OpenEbsEngine: engine, VolType: volType, @@ -126,7 +109,53 @@ func customTopologyImmediateTest(decor string, engine common.OpenEbsEngine, volT // Remove the labels from nodes after the end of test err = k8stest.UnlabelNode(targetNode, key) Expect(err).ToNot(HaveOccurred(), "failed to remove label from node %s", targetNode) + targetNode = "" + + productConfig := e2e_config.GetConfig().Product + label := fmt.Sprintf("%s=%s", productConfig.LocalEngineComponentPodLabelKey, + productConfig.LvmEngineComponentDsPodLabelValue) + + // restart lvm daemonset pods so that topology key present in csinode kubernetes object + // for local.csi.openebs.io plugin driver should be removed before starting new topology test + // and to do so , daemonset pods need to restarted after removing node label with the key + err = k8stest.DeletePodsByLabel(label, common.NSOpenEBS()) + Expect(err).To(BeNil(), "failed to restart lvm daemonset pods with label %s", label) + + // verify lvm daemonset to be ready + Eventually(func() bool { + return k8stest.DaemonSetReady(productConfig.LvmEngineDaemonSetName, common.NSOpenEBS()) + }, + defDaemonsetReadyTime, + "5s", + ).Should(BeTrue()) + + ready, err := k8stest.OpenEBSReady(10, 540) + Expect(err).To(BeNil(), "failed to verify openebs pods running state") + Expect(ready).To(BeTrue(), "some of the openebs pods are not running") + // verify topology key in csi node + var csiNodeErr error + Eventually(func() bool { + var isKeyFound bool + isKeyFound, csiNodeErr = k8stest.CheckCsiNodeTopologyKeysPresent(workerNodes[0], + productConfig.LvmEnginePluginDriverName, + []string{ + key, + }) + if csiNodeErr != nil { + logf.Log.Info("Failed to check csinode topology key", + "driver", productConfig.LvmEnginePluginDriverName, + "key", key, + "node", workerNodes[0], + "error", err) + } + return isKeyFound + }, + csiNodeUpdateTime, + "5s", + ).Should(BeFalse()) + + Expect(csiNodeErr).ToNot(HaveOccurred(), "failed to get csi node %s, %v", workerNodes[0], csiNodeErr) } /* @@ -137,7 +166,7 @@ Scenario: Node custom node topology for immediate volume binding And Minimum two worker nodes should exist in cluster When Lvm WaitForFirstConsumer binding volumes and applications (number of worker nodes + 1) are deployed using custom topology Then All volumes should be in pending state - When Set the ALLOWED_TOPOLOGIES env in lvm node-daemonset with custom topology key + When lvm node-daemonset pods are restarted Then Verify topology key is now available in csi node for local.csi.openebs.io plugin driver And All volumes should be transition from pending to bound state And All volumes should get provisioned on only those node which was labeled prior to the provisioning @@ -145,38 +174,22 @@ Scenario: Node custom node topology for immediate volume binding func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType common.VolumeType, fstype common.FileSystemType, volBindModeWait bool) { - loopDevice := e2e_agent.LoopDevice{ - Size: 10737418240, - ImgDir: "/tmp", - } - workerNodes, err := lvm.ListLvmNode(common.NSOpenEBS()) Expect(err).ToNot(HaveOccurred(), "failed to list worker node") - nodeConfig = lvm.LvmNodesDevicePvVgConfig{ - VgName: "lvmvg", - NodeDeviceMap: make(map[string]e2e_agent.LoopDevice), // Properly initialize the map - } - for _, node := range workerNodes { - nodeConfig.NodeDeviceMap[node] = loopDevice - } - - logf.Log.Info("setup node with loop device, pv and vg", "node config", nodeConfig) - err = nodeConfig.ConfigureLvmNodesWithDeviceAndVg() - Expect(err).ToNot(HaveOccurred(), "failed to setup node") - // minimum worker nodes in cluster should be two - Expect(len(workerNodes)).Should(BeNumerically(">=", 2), + Expect(len(workerNodes)).Should(BeNumerically(">=", 1), "test case requires are least 2 worker nodes, %d nodes found", len(workerNodes)) - key := "lvme2e/nodename" - targetNode := workerNodes[0] + key = "lvme2e/nodename" + targetNode = workerNodes[0] // label worker node + logf.Log.Info("Label node", "node", targetNode, "key", key) err = k8stest.LabelNode(targetNode, key, targetNode) Expect(err).ToNot(HaveOccurred(), "failed to label node %s", targetNode) - lvmScTopology := k8stest.LvmOptions{ + lvmScOption := k8stest.LvmOptions{ VolGroup: nodeConfig.VgName, Storage: "lvm", ThinProvision: common.No, @@ -191,18 +204,18 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co }, }, } - var appInstances []*k8stest.FioApplication + appInstances = []*k8stest.FioApplication{} for i := 0; i <= len(workerNodes); i++ { app := k8stest.FioApplication{ - Decor: decor, + Decor: fmt.Sprintf("%s-%d", decor, i), VolSizeMb: 1024, OpenEbsEngine: engine, VolType: volType, FsType: fstype, Loops: 1, VolWaitForFirstConsumer: volBindModeWait, - Lvm: lvmScTopology, + Lvm: lvmScOption, SkipPvcVerificationAfterCreate: true, } appInstances = append(appInstances, &app) @@ -215,6 +228,7 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co Expect(err).ToNot(HaveOccurred(), "failed to create volume %s, %v", app.Decor, err) } + logf.Log.Info("Sleep for 30 seconds before verifying pvc's pending state") time.Sleep(30 * time.Second) for ix, app := range appInstances { @@ -226,15 +240,14 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co } productConfig := e2e_config.GetConfig().Product - allowedTopologyEnvValue := fmt.Sprintf("kubernetes.io/hostname,%s", key) - // update csi daemonset with ALLOWED_TOPOLOGIES env with test-specific topology key - dsOldEnvList, err := k8stest.UpdateDemonsetContainerEnv(productConfig.LvmEngineDaemonSetName, - productConfig.LvmEnginePluginContainerName, - common.NSOpenEBS(), - allowedTopologyEnvName, - allowedTopologyEnvValue, - ) - Expect(err).ToNot(HaveOccurred(), "failed to update lvm Daemonset with topology env %s", productConfig.LvmEngineDaemonSetName) + label := fmt.Sprintf("%s=%s", productConfig.LocalEngineComponentPodLabelKey, + productConfig.LvmEngineComponentDsPodLabelValue) + + // Restart lvm daemonset pods after applying node label with key + // so that csinode kubernetes object for local.csi.openebs.io plugin driver picks + // that particular topology key for scheduling volume + err = k8stest.DeletePodsByLabel(label, common.NSOpenEBS()) + Expect(err).To(BeNil(), "failed to restart lvm daemonset pods with label %s", label) // verify lvm daemonset to be ready Eventually(func() bool { @@ -249,17 +262,39 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co Expect(ready).To(BeTrue(), "some of the openebs pods are not running") // verify topology key in csi node - isKeyFound, err := k8stest.CheckCsiNodeTopologyKeysPresent(targetNode, - productConfig.LvmEnginePluginDriverName, - []string{ - "kubernetes.io/hostname", - key, - }) - Expect(err).ToNot(HaveOccurred(), "failed to get csi node %s, %v", targetNode, err) - Expect(isKeyFound).To(BeTrue(), "failed to get csi node %s key, %v", targetNode, key) + logf.Log.Info("verify topology key in csi node", "key", key, "node", targetNode) + var csiNodeErr error + Eventually(func() bool { + var isKeyFound bool + isKeyFound, csiNodeErr = k8stest.CheckCsiNodeTopologyKeysPresent(targetNode, + productConfig.LvmEnginePluginDriverName, + []string{ + key, + }) + if csiNodeErr != nil { + logf.Log.Info("Failed to check csinode topology key", + "driver", productConfig.LvmEnginePluginDriverName, + "key", key, + "node", targetNode, + "error", err) + } + return isKeyFound + }, + csiNodeUpdateTime, + "5s", + ).Should(BeTrue()) + + Expect(csiNodeErr).ToNot(HaveOccurred(), "failed to get csi node %s, %v", targetNode, err) + // deploy fio pods for created lvm volumes for ix, app := range appInstances { logf.Log.Info(fmt.Sprintf("%d)", ix), "fio-pod", app.Decor) + + // deploy fio pod with created volume + logf.Log.Info("deploy fio pod with created volume") + err = app.DeployApplication() + Expect(err).To(BeNil(), "failed to deploy app") + //verify pvc and pv to be bound volUuid, err := k8stest.VerifyVolumeProvision(app.GetPvcName(), common.NSDefault) Expect(err).ToNot(HaveOccurred()) @@ -268,25 +303,22 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co // use created PVC which is deployed as part of restore app err = app.RefreshVolumeState() Expect(err).ToNot(HaveOccurred()) - - // deploy fio pod with created volume - logf.Log.Info("deploy fio pod with created volume") - err = app.DeployApplication() - Expect(err).To(BeNil(), "failed to deploy app") } // verify all fio application are deployed on same labeled node + logf.Log.Info("verify all fio application are deployed on same labeled node", "labeled node", targetNode) for ix, app := range appInstances { logf.Log.Info(fmt.Sprintf("%d)", ix), "fio-pod", app.Decor) appPodName := app.GetPodName() logf.Log.Info("app pod", "fio-pod", appPodName) //get node name where where app is deployed node, err := k8stest.GetNodeForPodByPrefix(appPodName, common.NSDefault) - Expect(err).ToNot(HaveOccurred(), "failed to node for app pod %s, %v", appPodName, err) - Expect(node).Should(Equal(targetNode), "app pod %s does not scheduled on node %s", targetNode) + Expect(err).ToNot(HaveOccurred(), "failed to get node for app pod %s, %v", appPodName, err) + Expect(node).Should(Equal(targetNode), "app pod %s is not scheduled on node %s", targetNode) } // remove all fio application + logf.Log.Info("remove all fio application") for ix, app := range appInstances { logf.Log.Info(fmt.Sprintf("%d)", ix), "fio-pod", app.Decor, "err", err) // remove app pod, pvc,sc @@ -298,13 +330,12 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co err = k8stest.UnlabelNode(targetNode, key) Expect(err).ToNot(HaveOccurred(), "failed to remove label from node %s", targetNode) - // update csi daemonset with ALLOWED_TOPOLOGIES env with test-specific topology key - err = k8stest.UpdateDemonsetContainerAllEnv(productConfig.LvmEngineDaemonSetName, - productConfig.LvmEnginePluginContainerName, - common.NSOpenEBS(), - dsOldEnvList, - ) - Expect(err).ToNot(HaveOccurred(), "failed to reset lvm Daemonset with old env %s", productConfig.LvmEngineDaemonSetName) + // restart lvm daemonset pods so that topology key present in csinode kubernetes object + // for local.csi.openebs.io plugin driver should be removed before starting new topology test + // and to do so , daemonset pods needs to restated after removing node label with the key + err = k8stest.DeletePodsByLabel(label, common.NSOpenEBS()) + Expect(err).To(BeNil(), "failed to restart lvm daemonset pods with label %s", label) + // verify lvm daemonset to be ready Eventually(func() bool { return k8stest.DaemonSetReady(productConfig.LvmEngineDaemonSetName, common.NSOpenEBS()) @@ -333,8 +364,21 @@ var _ = Describe("lvm_custom_node_topology", func() { AfterEach(func() { // Check resource leakage. - err := e2e_ginkgo.AfterEachK8sCheck() - Expect(err).ToNot(HaveOccurred()) + afterErr := e2e_ginkgo.AfterEachK8sCheck() + // remove all fio application + for ix, app := range appInstances { + logf.Log.Info(fmt.Sprintf("%d)", ix), "fio-pod", app.Decor) + // remove app pod, pvc,sc + err := app.Cleanup() + Expect(err).To(BeNil(), "failed to clean resources") + } + if targetNode != "" { + // Remove the labels from nodes after the end of test + err := k8stest.UnlabelNode(targetNode, key) + Expect(err).ToNot(HaveOccurred(), "failed to remove label from node %s", targetNode) + targetNode = "" + } + Expect(afterErr).ToNot(HaveOccurred()) }) // immediate binding @@ -353,6 +397,9 @@ var _ = BeforeSuite(func() { err := e2e_ginkgo.SetupTestEnv() Expect(err).ToNot(HaveOccurred(), "failed to setup test environment in BeforeSuite : SetupTestEnv %v", err) + //setup nodes with lvm pv and vg + nodeConfig, err = lvm.SetupLvmNodes("lvmvg", 10737418240) + Expect(err).ToNot(HaveOccurred(), "failed to setup lvm pv and vg") }) var _ = AfterSuite(func() { diff --git a/src/tests/lvm/lvm_ha_controller/lvm_ha_controller_test.go b/src/tests/lvm/lvm_ha_controller/lvm_ha_controller_test.go index e6df71ee..f1d6d72d 100644 --- a/src/tests/lvm/lvm_ha_controller/lvm_ha_controller_test.go +++ b/src/tests/lvm/lvm_ha_controller/lvm_ha_controller_test.go @@ -4,7 +4,6 @@ import ( "testing" "time" - "github.com/openebs/openebs-e2e/common/e2e_agent" "github.com/openebs/openebs-e2e/common/e2e_config" "github.com/openebs/openebs-e2e/common/e2e_ginkgo" "github.com/openebs/openebs-e2e/common/k8sinstall" @@ -39,42 +38,23 @@ var nodeConfig lvm.LvmNodesDevicePvVgConfig var defLeaseSwitchTime = 120 // in seconds var nodesWithoutTaint []string var lvmControllerOrgReplica int32 +var app k8stest.FioApplication var volumeProvisionErrorMsg = "waiting for a volume to be created" func controllerHaTest(decor string, engine common.OpenEbsEngine, volType common.VolumeType, fstype common.FileSystemType, volBindModeWait bool) { - + var err error e2e_config := e2e_config.GetConfig().Product - app := k8stest.FioApplication{ + app = k8stest.FioApplication{ Decor: decor, - VolSizeMb: 4096, + VolSizeMb: 1024, OpenEbsEngine: engine, VolType: volType, FsType: fstype, - Loops: 5, + Loops: 7, VolWaitForFirstConsumer: volBindModeWait, SkipPvcVerificationAfterCreate: true, } - loopDevice := e2e_agent.LoopDevice{ - Size: 10737418240, - ImgDir: "/tmp", - } - - workerNodes, err := lvm.ListLvmNode(common.NSOpenEBS()) - Expect(err).ToNot(HaveOccurred(), "failed to list worker node") - - nodeConfig = lvm.LvmNodesDevicePvVgConfig{ - VgName: "lvmvg", - NodeDeviceMap: make(map[string]e2e_agent.LoopDevice), // Properly initialize the map - } - for _, node := range workerNodes { - nodeConfig.NodeDeviceMap[node] = loopDevice - } - - logf.Log.Info("setup node with loop device, pv and vg", "node config", nodeConfig) - err = nodeConfig.ConfigureLvmNodesWithDeviceAndVg() - Expect(err).ToNot(HaveOccurred(), "failed to setup node") - // setup sc parameters app.Lvm = k8stest.LvmOptions{ VolGroup: nodeConfig.VgName, @@ -102,6 +82,10 @@ func controllerHaTest(decor string, engine common.OpenEbsEngine, volType common. err = app.CreateVolume() Expect(err).To(BeNil(), "failed to create pvc") + // sleep for 30 seconds + logf.Log.Info("Sleep for 30 seconds") + time.Sleep(30 * time.Second) + // verify pvc to be in pending state pvcPhase, err := k8stest.GetPvcStatusPhase(app.GetPvcName(), common.NSDefault) Expect(err).ToNot(HaveOccurred(), "failed to get pvc phase") @@ -127,11 +111,6 @@ func controllerHaTest(decor string, engine common.OpenEbsEngine, volType common. Expect(isEventPresent).To(BeTrue()) // Scale up the lvm-controller deployment replica to initial replica + 1 - err = k8stest.RestoreDeploymentReplicas(lvmControllerName, common.NSOpenEBS(), 120, lvmControllerOrgReplica+1) - Expect(err).To(BeNil(), "failed to scale deployment %s, error: %v", lvmControllerName, err) - - // get the no of replicas in lvm-controller deployment - // Scale up the lvm-controller deployment logf.Log.Info("Scale up lvm-controller deployment") _, err = k8sinstall.ScaleLvmControllerViaHelm(lvmControllerOrgReplica + 1) Expect(err).To(BeNil(), "failed to scale deployment %s, error: %v", lvmControllerName, err) @@ -227,12 +206,11 @@ var _ = Describe("lvm_ha_controller", func() { AfterEach(func() { // Check resource leakage. - err := e2e_ginkgo.AfterEachK8sCheck() - Expect(err).ToNot(HaveOccurred()) + after_err := e2e_ginkgo.AfterEachK8sCheck() if len(nodesWithoutTaint) != 0 { // remove taints form nodes for _, node := range nodesWithoutTaint { - err = k8stest.RemoveNoScheduleTaintFromNode(node) + err := k8stest.RemoveNoScheduleTaintFromNode(node) Expect(err).To(BeNil(), "failed to taint node %s", node) } ready, err := k8stest.OpenEBSReady(10, 540) @@ -240,31 +218,31 @@ var _ = Describe("lvm_ha_controller", func() { Expect(ready).To(BeTrue(), "some of the openebs pods are not running") } + // cleanup k8s resources if exist + logf.Log.Info("cleanup k8s resources if exist") + err := app.Cleanup() + Expect(err).ToNot(HaveOccurred(), "failed to k8s resource") + // Scale up the lvm-controller deployment replica to initial replica logf.Log.Info("Scale up lvm-controller deployment") _, err = k8sinstall.ScaleLvmControllerViaHelm(lvmControllerOrgReplica) Expect(err).To(BeNil(), "failed to scale deployment %s, error: %v", e2e_config.GetConfig().Product.LvmEngineControllerDeploymentName, err) - }) - - It("lvm ext4: should verify high availability mode", func() { - controllerHaTest("lvm-ha", common.Lvm, common.VolFileSystem, common.Ext4FsType, true) - }) - It("lvm block: should verify high availability mode", func() { - controllerHaTest("lvm-ha", common.Lvm, common.VolRawBlock, common.NoneFsType, true) + Expect(after_err).ToNot(HaveOccurred()) }) // immediate binding It("lvm ext4 immediate binding: should verify high availability mode", func() { controllerHaTest("lvm-ha", common.Lvm, common.VolFileSystem, common.Ext4FsType, false) }) - It("lvm block immediate binding: should verify high availability mode", func() { - controllerHaTest("lvm-ha", common.Lvm, common.VolRawBlock, common.NoneFsType, false) - }) + }) var _ = BeforeSuite(func() { err := e2e_ginkgo.SetupTestEnv() Expect(err).ToNot(HaveOccurred(), "failed to setup test environment in BeforeSuite : SetupTestEnv %v", err) + //setup nodes with lvm pv and vg + nodeConfig, err = lvm.SetupLvmNodes("lvmvg", 10737418240) + Expect(err).ToNot(HaveOccurred(), "failed to setup lvm pv and vg") }) diff --git a/src/tests/lvm/lvm_thick_volume_resize/lvm_thick_volume_resize_test.go b/src/tests/lvm/lvm_thick_volume_resize/lvm_thick_volume_resize_test.go index 0381e0d9..ba334b58 100644 --- a/src/tests/lvm/lvm_thick_volume_resize/lvm_thick_volume_resize_test.go +++ b/src/tests/lvm/lvm_thick_volume_resize/lvm_thick_volume_resize_test.go @@ -6,6 +6,7 @@ import ( "github.com/openebs/openebs-e2e/common" "github.com/openebs/openebs-e2e/common/e2e_ginkgo" "github.com/openebs/openebs-e2e/common/k8stest" + "github.com/openebs/openebs-e2e/common/lvm" volumeResize "github.com/openebs/openebs-e2e/src/tests/lvm/common" . "github.com/onsi/ginkgo/v2" @@ -24,6 +25,8 @@ import ( // And pvc and pv objects should verify that capacity // And application should be able to use that resized space +var nodeConfig lvm.LvmNodesDevicePvVgConfig + func TestLvmThickVolumeResizeTest(t *testing.T) { // Initialise test and set class and file names for reports e2e_ginkgo.InitTesting(t, "lvm_thick_volume_resize", "lvm_thick_volume_resize") @@ -39,36 +42,43 @@ var _ = Describe("lvm_thick_volume_resize", func() { AfterEach(func() { // Check resource leakage. - err := e2e_ginkgo.AfterEachK8sCheck() - Expect(err).ToNot(HaveOccurred()) + after_err := e2e_ginkgo.AfterEachK8sCheck() + // cleanup k8s resources if exist + logf.Log.Info("cleanup k8s resources if exist") + err := volumeResize.ResizeApp.Cleanup() + Expect(err).ToNot(HaveOccurred(), "failed to k8s resource") + err = volumeResize.ResizeApp2.Cleanup() + Expect(err).ToNot(HaveOccurred(), "failed to k8s resource") + + Expect(after_err).ToNot(HaveOccurred()) }) It("lvm ext4: should verify thick volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, common.VolFileSystem, common.Ext4FsType, true, common.No) + volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.Ext4FsType, true, common.No) }) It("lvm xfs: should verify thick volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, common.VolFileSystem, common.XfsFsType, true, common.No) + volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.XfsFsType, true, common.No) }) It("lvm btrfs: should verify thick volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, common.VolFileSystem, common.BtrfsFsType, true, common.No) + volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.BtrfsFsType, true, common.No) }) It("lvm block: should verify thick volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, common.VolRawBlock, common.NoneFsType, true, common.No) + volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, nodeConfig.VgName, common.VolRawBlock, common.NoneFsType, true, common.No) }) // immediate binding It("lvm ext4: should verify thick volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, common.VolFileSystem, common.Ext4FsType, false, common.No) + volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.Ext4FsType, false, common.No) }) It("lvm xfs: should verify thick volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, common.VolFileSystem, common.XfsFsType, false, common.No) + volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.XfsFsType, false, common.No) }) It("lvm btrfs: should verify thick volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, common.VolFileSystem, common.BtrfsFsType, false, common.No) + volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.BtrfsFsType, false, common.No) }) It("lvm block: should verify thick volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, common.VolRawBlock, common.NoneFsType, false, common.No) + volumeResize.LvmVolumeResizeTest("lvm-volume-resize", common.Lvm, nodeConfig.VgName, common.VolRawBlock, common.NoneFsType, false, common.No) }) }) @@ -76,15 +86,19 @@ var _ = BeforeSuite(func() { err := e2e_ginkgo.SetupTestEnv() Expect(err).ToNot(HaveOccurred(), "failed to setup test environment in BeforeSuite : SetupTestEnv %v", err) + //setup nodes with lvm pv and vg + nodeConfig, err = lvm.SetupLvmNodes("lvmvg", 10737418240) + Expect(err).ToNot(HaveOccurred(), "failed to setup lvm pv and vg") + }) var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. By("tearing down the test environment") - logf.Log.Info("remove node with device and vg", "node config", volumeResize.NodeConfig) - err := volumeResize.NodeConfig.RemoveConfiguredLvmNodesWithDeviceAndVg() + logf.Log.Info("remove node with device and vg", "node config", nodeConfig) + err := nodeConfig.RemoveConfiguredLvmNodesWithDeviceAndVg() Expect(err).ToNot(HaveOccurred(), "failed to cleanup node with device") + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. By("tearing down the test environment") err = k8stest.TeardownTestEnv() Expect(err).ToNot(HaveOccurred(), "failed to tear down test environment in AfterSuite : TeardownTestEnv %v", err) }) diff --git a/src/tests/lvm/lvm_thin_volume_resize/lvm_thin_volume_resize_test.go b/src/tests/lvm/lvm_thin_volume_resize/lvm_thin_volume_resize_test.go index 0e18979e..83fee0ae 100644 --- a/src/tests/lvm/lvm_thin_volume_resize/lvm_thin_volume_resize_test.go +++ b/src/tests/lvm/lvm_thin_volume_resize/lvm_thin_volume_resize_test.go @@ -4,8 +4,10 @@ import ( "testing" "github.com/openebs/openebs-e2e/common" + "github.com/openebs/openebs-e2e/common/e2e_agent" "github.com/openebs/openebs-e2e/common/e2e_ginkgo" "github.com/openebs/openebs-e2e/common/k8stest" + "github.com/openebs/openebs-e2e/common/lvm" volumeResize "github.com/openebs/openebs-e2e/src/tests/lvm/common" . "github.com/onsi/ginkgo/v2" @@ -24,6 +26,8 @@ import ( // And pvc and pv objects should verify that capacity // And application should be able to use that resized space +var nodeConfig lvm.LvmNodesDevicePvVgConfig + func TestLvmThinVolumeResizeTest(t *testing.T) { // Initialise test and set class and file names for reports e2e_ginkgo.InitTesting(t, "lvm_thin_volume_resize", "lvm_thin_volume_resize") @@ -39,36 +43,47 @@ var _ = Describe("lvm_thin_volume_resize", func() { AfterEach(func() { // Check resource leakage. - err := e2e_ginkgo.AfterEachK8sCheck() - Expect(err).ToNot(HaveOccurred()) + after_err := e2e_ginkgo.AfterEachK8sCheck() + // cleanup k8s resources if exist + logf.Log.Info("cleanup k8s resources if exist") + err := volumeResize.ResizeApp.Cleanup() + Expect(err).ToNot(HaveOccurred(), "failed to k8s resource") + err = volumeResize.ResizeApp2.Cleanup() + Expect(err).ToNot(HaveOccurred(), "failed to k8s resource") + if volumeResize.ThinPoolNode != "" { + out, err := e2e_agent.LvmLvRemoveThinPool(volumeResize.ThinPoolNode, "lvmvg") + Expect(err).To(BeNil(), "failed to remove lv thin pool on node %s with vg %s, output: %s", volumeResize.ThinPoolNode, "lvmvg", out) + } + + Expect(after_err).ToNot(HaveOccurred()) }) It("lvm ext4: should verify thin volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, common.VolFileSystem, common.Ext4FsType, true, common.Yes) + volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.Ext4FsType, true, common.Yes) }) It("lvm xfs: should verify thin volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, common.VolFileSystem, common.XfsFsType, true, common.Yes) + volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.XfsFsType, true, common.Yes) }) It("lvm btrfs: should verify thin volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, common.VolFileSystem, common.BtrfsFsType, true, common.Yes) + volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.BtrfsFsType, true, common.Yes) }) It("lvm block: should verify thin volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, common.VolRawBlock, common.NoneFsType, true, common.Yes) + volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, nodeConfig.VgName, common.VolRawBlock, common.NoneFsType, true, common.Yes) }) // immediate binding It("lvm ext4: should verify thin volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, common.VolFileSystem, common.Ext4FsType, false, common.Yes) + volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.Ext4FsType, false, common.Yes) }) It("lvm xfs: should verify thin volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, common.VolFileSystem, common.XfsFsType, false, common.Yes) + volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.XfsFsType, false, common.Yes) }) It("lvm btrfs: should verify thin volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, common.VolFileSystem, common.BtrfsFsType, false, common.Yes) + volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, nodeConfig.VgName, common.VolFileSystem, common.BtrfsFsType, false, common.Yes) }) It("lvm block: should verify thin volume resize", func() { - volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, common.VolRawBlock, common.NoneFsType, false, common.Yes) + volumeResize.LvmVolumeResizeTest("lvm-thin-volume-resize", common.Lvm, nodeConfig.VgName, common.VolRawBlock, common.NoneFsType, false, common.Yes) }) }) @@ -76,15 +91,21 @@ var _ = BeforeSuite(func() { err := e2e_ginkgo.SetupTestEnv() Expect(err).ToNot(HaveOccurred(), "failed to setup test environment in BeforeSuite : SetupTestEnv %v", err) + //setup nodes with lvm pv and vg + nodeConfig, err = lvm.SetupLvmNodes("lvmvg", 10737418240) + Expect(err).ToNot(HaveOccurred(), "failed to setup lvm pv and vg") + err = lvm.EnableLvmThinPoolAutoExpansion(75, 20) + Expect(err).ToNot(HaveOccurred(), "failed to update thin_pool_autoextend_threshold and thin_pool_autoextend_percent in lvm.conf") + }) var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. By("tearing down the test environment") - logf.Log.Info("remove node with device and vg", "node config", volumeResize.NodeConfig) - err := volumeResize.NodeConfig.RemoveConfiguredLvmNodesWithDeviceAndVg() + // logf.Log.Info("remove node with device and vg", "node config", nodeConfig) + err := nodeConfig.RemoveConfiguredLvmNodesWithDeviceAndVg() Expect(err).ToNot(HaveOccurred(), "failed to cleanup node with device") + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. By("tearing down the test environment") err = k8stest.TeardownTestEnv() Expect(err).ToNot(HaveOccurred(), "failed to tear down test environment in AfterSuite : TeardownTestEnv %v", err) }) diff --git a/src/tests/lvm/lvm_volume_snapshot/lvm_volume_snapshot_test.go b/src/tests/lvm/lvm_volume_snapshot/lvm_volume_snapshot_test.go index 0b9c893c..877cdc4c 100644 --- a/src/tests/lvm/lvm_volume_snapshot/lvm_volume_snapshot_test.go +++ b/src/tests/lvm/lvm_volume_snapshot/lvm_volume_snapshot_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/openebs/openebs-e2e/common/e2e_agent" "github.com/openebs/openebs-e2e/common/e2e_config" "github.com/openebs/openebs-e2e/common/e2e_ginkgo" "github.com/openebs/openebs-e2e/common/lvm" "github.com/openebs/openebs-e2e/common/mayastor/snapshot" + lvmCommon "github.com/openebs/openebs-e2e/src/tests/lvm/common" "github.com/openebs/openebs-e2e/common" "github.com/openebs/openebs-e2e/common/k8stest" @@ -31,63 +31,49 @@ import ( Then the snapshot should be successfully created And the snapshot object should be ready And the snapshot content object associated with snapshot should be ready + And snapshot content restore size should be zero */ var nodeConfig lvm.LvmNodesDevicePvVgConfig +var app k8stest.FioApplication +var snapshotClassName, snapshotName, snapshotNamespace string func volumeSnapshotTest(decor string, engine common.OpenEbsEngine, volType common.VolumeType, fstype common.FileSystemType, volBindModeWait bool) { - - app := k8stest.FioApplication{ + app = k8stest.FioApplication{} + app = k8stest.FioApplication{ Decor: decor, - VolSizeMb: 4096, + VolSizeMb: 1024, OpenEbsEngine: engine, VolType: volType, FsType: fstype, - Loops: 5, + Loops: 10, VolWaitForFirstConsumer: volBindModeWait, } - loopDevice := e2e_agent.LoopDevice{ - Size: 10737418240, - ImgDir: "/tmp", - } - - workerNodes, err := lvm.ListLvmNode(common.NSMayastor()) - Expect(err).ToNot(HaveOccurred(), "failed to list worker node") - - nodeConfig = lvm.LvmNodesDevicePvVgConfig{ - VgName: "lvmvg", - NodeDeviceMap: make(map[string]e2e_agent.LoopDevice), // Properly initialize the map - } - for _, node := range workerNodes { - nodeConfig.NodeDeviceMap[node] = loopDevice - } - - logf.Log.Info("setup node with loop device, pv and vg", "node config", nodeConfig) - err = nodeConfig.ConfigureLvmNodesWithDeviceAndVg() - Expect(err).ToNot(HaveOccurred(), "failed to setup node") - // setup sc parameters app.Lvm = k8stest.LvmOptions{ - VolGroup: nodeConfig.VgName, + // VolGroup: nodeConfig.VgName, + VolGroup: "lvmvg", Storage: "lvm", ThinProvision: common.No, } - + if app.FsType == common.BtrfsFsType { + app.FsPercent = 60 + } logf.Log.Info("create sc, pvc, fio pod") - err = app.DeployApplication() + err := app.DeployApplication() Expect(err).To(BeNil(), "failed to deploy app") time.Sleep(30 * time.Second) // snapshot steps - snapshotClassName := fmt.Sprintf("snapshotclass-%s", app.GetPvcName()) - snapshotName := fmt.Sprintf("snapshot-%s", app.GetPvcName()) - snapshotNamespace := common.NSDefault + snapshotClassName = fmt.Sprintf("snapshotclass-%s", app.GetPvcName()) + snapshotName = fmt.Sprintf("snapshot-%s", app.GetPvcName()) + snapshotNamespace = common.NSDefault logf.Log.Info("Create Snapshot", "Snapshot class", snapshotClassName, "Snapshot", snapshotName, "Namespace", snapshotNamespace) csiDriver := e2e_config.GetConfig().Product.LvmEngineProvisioner // create snapshot for volume - snapshotObj, snapshotContentName, err := k8stest.CreateVolumeSnapshot(snapshotClassName, snapshotName, app.GetPvcName(), common.NSDefault, csiDriver) + snapshotObj, snapshotContentName, err := k8stest.CreateVolumeSnapshot(snapshotClassName, snapshotName, app.GetPvcName(), snapshotNamespace, csiDriver) Expect(err).ToNot(HaveOccurred()) logf.Log.Info("Snapshot Created ", "Snapshot", snapshotObj, "Snapshot Content Name", snapshotContentName) @@ -95,9 +81,9 @@ func volumeSnapshotTest(decor string, engine common.OpenEbsEngine, volType commo Expect(snapshotObj).ShouldNot(BeNil()) // verify Snapshot CR - status, err := snapshot.VerifySuccessfulSnapshotCreation(snapshotName, snapshotContentName, snapshotNamespace, true) + status, err := lvmCommon.LvmVolumeSnapshotVerify(snapshotName, snapshotContentName, snapshotNamespace, false) Expect(err).ToNot(HaveOccurred(), "error while verifying snapshot creation") - Expect(status).Should(BeTrue(), "failed to verify successful snapshot %s creation", snapshotName) + Expect(status).Should(BeTrue(), "failed to verify successful lvm snapshot %s creation", snapshotName) // Check fio pod status phase, podLogSysnopsis, err := k8stest.CheckFioPodCompleted(app.GetPodName(), common.NSDefault) @@ -107,11 +93,11 @@ func volumeSnapshotTest(decor string, engine common.OpenEbsEngine, volType commo // remove snapshot and snapshot class err = snapshot.DeleteVolumeSnapshot(snapshotClassName, snapshotName, common.NSDefault) Expect(err).ToNot(HaveOccurred()) + snapshotName = "" // remove app pod, pvc,sc err = app.Cleanup() Expect(err).To(BeNil(), "failed to clean resources") - } func TestLvmVolumeSnapshotTest(t *testing.T) { @@ -129,11 +115,20 @@ var _ = Describe("lvm_volume_snapshot", func() { AfterEach(func() { // Check resource leakage. - err := e2e_ginkgo.AfterEachK8sCheck() - Expect(err).ToNot(HaveOccurred()) + after_err := e2e_ginkgo.AfterEachK8sCheck() + // cleanup k8s resources if exist + logf.Log.Info("cleanup k8s resources if exist") + // remove snapshot and snapshot class + if snapshotName != "" { + err := snapshot.DeleteVolumeSnapshot(snapshotClassName, snapshotName, snapshotNamespace) + Expect(err).ToNot(HaveOccurred()) + } + err := app.Cleanup() + Expect(err).ToNot(HaveOccurred(), "failed to k8s resource") + + Expect(after_err).ToNot(HaveOccurred()) }) - // immediate binding It("lvm ext4 immediate binding: should verify a volume snapshot", func() { volumeSnapshotTest("lvm-ext4", common.Lvm, common.VolFileSystem, common.Ext4FsType, false) }) @@ -152,15 +147,19 @@ var _ = BeforeSuite(func() { err := e2e_ginkgo.SetupTestEnv() Expect(err).ToNot(HaveOccurred(), "failed to setup test environment in BeforeSuite : SetupTestEnv %v", err) + //setup nodes with lvm pv and vg + nodeConfig, err = lvm.SetupLvmNodes("lvmvg", 10737418240) + Expect(err).ToNot(HaveOccurred(), "failed to setup lvm pv and vg") + }) var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. By("tearing down the test environment") - logf.Log.Info("remove node with device and vg", "node config", nodeConfig) + // logf.Log.Info("remove node with device and vg", "node config", nodeConfig) err := nodeConfig.RemoveConfiguredLvmNodesWithDeviceAndVg() Expect(err).ToNot(HaveOccurred(), "failed to cleanup node with device") + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. By("tearing down the test environment") err = k8stest.TeardownTestEnv() Expect(err).ToNot(HaveOccurred(), "failed to tear down test environment in AfterSuite : TeardownTestEnv %v", err) }) diff --git a/src/tests/zfs/zfs_custom_node_topology/zfs_custom_node_topology_test.go b/src/tests/zfs/zfs_custom_node_topology/zfs_custom_node_topology_test.go index 5c84b7ce..7d836251 100644 --- a/src/tests/zfs/zfs_custom_node_topology/zfs_custom_node_topology_test.go +++ b/src/tests/zfs/zfs_custom_node_topology/zfs_custom_node_topology_test.go @@ -3,6 +3,7 @@ package zfs_custom_node_topology import ( "fmt" "testing" + "time" "github.com/openebs/openebs-e2e/common/e2e_config" "github.com/openebs/openebs-e2e/common/e2e_ginkgo" @@ -119,7 +120,7 @@ func customTopologyImmediateTest(decor string, engine common.OpenEbsEngine, volT productConfig.ZfsEngineComponentDsPodLabelValue) // restart zfs daemonset pods so that topology key present in csinode kubernetes object - // for local.csi.openebs.io plugin driver should be removed before starting new topology test + // for zfs.csi.openebs.io plugin driver should be removed before starting new topology test // and to do so , daemonset pods need to restarted after removing node label with the key err = k8stest.DeletePodsByLabel(label, common.NSOpenEBS()) Expect(err).To(BeNil(), "failed to restart zfs daemonset pods with label %s", label) @@ -158,7 +159,7 @@ func customTopologyImmediateTest(decor string, engine common.OpenEbsEngine, volT "5s", ).Should(BeFalse()) - Expect(csiNodeErr).ToNot(HaveOccurred(), "failed to get csi node %s, %v", targetNode, csiNodeErr) + Expect(csiNodeErr).ToNot(HaveOccurred(), "failed to get csi node %s, %v", workerNodes[0], csiNodeErr) } /* @@ -232,6 +233,9 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co Expect(err).ToNot(HaveOccurred(), "failed to create volume %s, %v", app.Decor, err) } + logf.Log.Info("Sleep for 30 seconds before verifying pvc's pending state") + time.Sleep(30 * time.Second) + for ix, app := range appInstances { logf.Log.Info(fmt.Sprintf("%d)", ix), "zfs-volume", app.Decor) // verify pvc to be in pending state @@ -245,7 +249,7 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co productConfig.ZfsEngineComponentDsPodLabelValue) // Restart zfs daemonset pods after applying node label with key - // so that csinode kubernetes object for local.csi.openebs.io plugin driver picks + // so that csinode kubernetes object for zfs.csi.openebs.io plugin driver picks // that particular topology key for scheduling volume err = k8stest.DeletePodsByLabel(label, common.NSOpenEBS()) Expect(err).To(BeNil(), "failed to restart zfs daemonset pods with label %s", label) @@ -330,7 +334,7 @@ func customTopologyWfcTest(decor string, engine common.OpenEbsEngine, volType co targetNode = "" // restart zfs daemonset pods so that topology key present in csinode kubernetes object - // for local.csi.openebs.io plugin driver should be removed before starting new topology test + // for zfs.csi.openebs.io plugin driver should be removed before starting new topology test // and to do so , daemonset pods needs to restated after removing node label with the key err = k8stest.DeletePodsByLabel(label, common.NSOpenEBS()) Expect(err).To(BeNil(), "failed to restart zfs daemonset pods with label %s", label) diff --git a/testplans/lvm.yaml b/testplans/lvm.yaml index 3f1c85db..ba9c7038 100644 --- a/testplans/lvm.yaml +++ b/testplans/lvm.yaml @@ -6,4 +6,9 @@ meta: - common testsuites: - lvm_volume_provisioning - - lvm_shared_mount \ No newline at end of file + - lvm_shared_mount + - lvm_volume_snapshot + - lvm_thick_volume_resize + # - lvm_ha_controller // this test require minimum two worker nodes in cluster + - lvm_custom_node_topology + - lvm_thin_volume_resize \ No newline at end of file