Skip to content

Commit

Permalink
ci: add integration for fresh installation
Browse files Browse the repository at this point in the history
    - Also, we make two jobs can run parallel

Signed-off-by: Vicente Cheng <[email protected]>
(cherry picked from commit e35cbb3)
  • Loading branch information
Vicente-Cheng authored and WebberHuang1118 committed Dec 25, 2024
1 parent cf0d04b commit bb82d79
Show file tree
Hide file tree
Showing 3 changed files with 120 additions and 32 deletions.
92 changes: 83 additions & 9 deletions .github/workflows/basic-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
run: |
make validate
make validate-ci
main_jobs:
job-new-installation:
needs: validation
runs-on:
- self-hosted
Expand All @@ -42,9 +42,82 @@ jobs:
- name: "Local Deployment (Harvester+Longhorn+Node-Disk-Manager) for testing"
id: vm_deploy
run: |
rm -rf ndm-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s ndm-vagrant-k3s
pushd ndm-vagrant-k3s
rm -rf ndm-new-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s ndm-new-vagrant-k3s
pushd ndm-new-vagrant-k3s
yq e -i ".cluster_size = 1" settings.yaml
./new-cluster.sh
echo "VM_DEPLOYED=true" >> "$GITHUB_ENV"
yq e -i ".longhorn_version = \"1.6.3\"" settings.yaml
./scripts/deploy_longhorn.sh
popd
- name: "Patch Image target"
run: |
./ci/scripts/patch-ttl-repo.sh
echo "NDM override result as below:"
cat ci/charts/ndm-override.yaml
- name: "Deploy NDM"
run: |
pushd ndm-new-vagrant-k3s
cp ../ci/scripts/deploy_ndm.sh ./deploy_ndm.sh
cp ../ci/charts/ndm-override.yaml ./ndm-override.yaml
./deploy_ndm.sh
popd
- name: "Add disk"
run: |
pushd ndm-new-vagrant-k3s
./scripts/attach-disk.sh node1 ndm-new-vagrant-k3s
sleep 30
popd
- name: "Run Basic Test"
id: basic-test
run: |
pushd ndm-new-vagrant-k3s
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
echo Running integration tests
NDM_HOME=`pwd` go test -v ./tests/...
- name: "Get NDM logs"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip getting logs"
exit 0
fi
./ci/scripts/get-debug-info.sh
- name: "Tear Down / Cleanup"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip VM destroy"
exit 0
fi
rm -rf /tmp/hotplug_disks/ndm-new-vagrant-k3s
pushd ndm-new-vagrant-k3s
vagrant destroy -f --parallel
popd
jobs-upgrade:
needs: validation
runs-on:
- self-hosted
- golang
steps:
- name: "Clone and check"
uses: actions/checkout@v3
- name: "Build the Image for the Integration Test"
run: |
BUILD_FOR_CI=true make
./ci/scripts/patch-ttl-repo.sh
echo "NDM override result as below:"
cat ci/charts/ndm-override.yaml
- name: "Local Deployment (Harvester+Longhorn+Node-Disk-Manager) for testing"
id: vm_deploy
run: |
rm -rf ndm-upgrade-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s ndm-upgrade-vagrant-k3s
pushd ndm-upgrade-vagrant-k3s
yq e -i ".cluster_size = 1" settings.yaml
./new-cluster.sh
echo "VM_DEPLOYED=true" >> "$GITHUB_ENV"
Expand All @@ -55,8 +128,8 @@ jobs:
popd
- name: "Add disk"
run: |
pushd ndm-vagrant-k3s
./scripts/attach-disk.sh node1
pushd ndm-upgrade-vagrant-k3s
./scripts/attach-disk.sh node1 ndm-upgrade-vagrant-k3s
sleep 30
popd
- name: "Patch Image target (for upgrade)"
Expand All @@ -66,14 +139,14 @@ jobs:
cat ci/charts/ndm-override.yaml
- name: "Upgrade NDM"
run: |
pushd ndm-vagrant-k3s
pushd ndm-upgrade-vagrant-k3s
cp ../ci/scripts/upgrade_ndm.sh ./upgrade_ndm.sh
./upgrade_ndm.sh
popd
- name: "Run Basic Test"
id: basic-test
run: |
pushd ndm-vagrant-k3s
pushd ndm-upgrade-vagrant-k3s
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
Expand All @@ -94,6 +167,7 @@ jobs:
echo "VM is not deployed, skip VM destroy"
exit 0
fi
pushd ndm-vagrant-k3s
rm -rf /tmp/hotplug_disks/ndm-upgrade-vagrant-k3s
pushd ndm-upgrade-vagrant-k3s
vagrant destroy -f --parallel
popd
6 changes: 5 additions & 1 deletion ci/scripts/deploy_ndm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,14 @@ ensure_longhorn_ready

pushd $TOP_DIR

cat >> ndm-override.yaml << 'EOF'
cat >> ndm-override.yaml.default << 'EOF'
autoProvisionFilter: [/dev/sd*]
EOF

if [ ! -f ndm-override.yaml ]; then
mv ndm-override.yaml.default ndm-override.yaml
fi

$HELM pull harvester-node-disk-manager --repo https://charts.harvesterhci.io --untar
$HELM install -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager ./harvester-node-disk-manager --create-namespace -n harvester-system

Expand Down
54 changes: 32 additions & 22 deletions tests/integration/test_1_disk_hotplug_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,18 @@ import (
*/

const (
hotplugTargetNodeName = "ndm-vagrant-k3s_node1"
hotplugDiskXMLFileName = "/tmp/hotplug_disks/node1-sda.xml"
hotplugTargetDiskName = "sda"
)

type HotPlugTestSuite struct {
suite.Suite
SSHClient *goph.Client
clientSet *clientset.Clientset
targetNodeName string
targetDiskName string
curBusPath string // to make sure which path we deployed
SSHClient *goph.Client
clientSet *clientset.Clientset
targetNodeName string
targetDiskName string
hotplugTargetNodeName string
hotplugTargetBaseDir string
}

func (s *HotPlugTestSuite) SetupSuite() {
Expand Down Expand Up @@ -84,6 +84,14 @@ func (s *HotPlugTestSuite) SetupSuite() {

s.clientSet, err = clientset.NewForConfig(config)
require.Equal(s.T(), err, nil, "New clientset should not get error")

cmd := fmt.Sprintf("ls %s |grep vagrant-k3s", os.Getenv("NDM_HOME"))
targetDirDomain, _, err := doCommand(cmd)
require.Equal(s.T(), err, nil, "Running command `%s` should not get error : %v", cmd, err)

s.hotplugTargetNodeName = fmt.Sprintf("%s_node1", strings.TrimSpace(targetDirDomain))
s.hotplugTargetBaseDir = fmt.Sprintf("/tmp/hotplug_disks/%s", strings.TrimSpace(targetDirDomain))

}

func (s *HotPlugTestSuite) AfterTest(_, _ string) {
Expand Down Expand Up @@ -117,9 +125,9 @@ func (s *HotPlugTestSuite) Test_0_PreCheckForDiskCount() {

func (s *HotPlugTestSuite) Test_1_HotPlugRemoveDisk() {
// remove disk dynamically
cmd := fmt.Sprintf("virsh detach-disk %s %s --live", hotplugTargetNodeName, hotplugTargetDiskName)
cmd := fmt.Sprintf("virsh detach-disk %s %s --live", s.hotplugTargetNodeName, hotplugTargetDiskName)
_, _, err := doCommand(cmd)
require.Equal(s.T(), err, nil, "Running command `virsh detach-disk` should not get error")
require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd)

// wait for controller handling
time.Sleep(5 * time.Second)
Expand All @@ -136,9 +144,10 @@ func (s *HotPlugTestSuite) Test_1_HotPlugRemoveDisk() {

func (s *HotPlugTestSuite) Test_2_HotPlugAddDisk() {
// remove disk dynamically
cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", hotplugTargetNodeName, hotplugDiskXMLFileName)
hotplugDiskXMLFileName := fmt.Sprintf("%s/node1-sda.xml", s.hotplugTargetBaseDir)
cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", s.hotplugTargetNodeName, hotplugDiskXMLFileName)
_, _, err := doCommand(cmd)
require.Equal(s.T(), err, nil, "Running command `virsh attach-device` should not get error")
require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd)

// wait for controller handling, the device will be changed need more time to wait for the controller handling
time.Sleep(30 * time.Second)
Expand All @@ -154,15 +163,16 @@ func (s *HotPlugTestSuite) Test_2_HotPlugAddDisk() {

func (s *HotPlugTestSuite) Test_3_AddDuplicatedWWNDsik() {
// create another another disk raw file and xml
const (
originalDeviceRaw = "/tmp/hotplug_disks/node1-sda.qcow2"
duplicatedDeviceXML = "/tmp/hotplug_disks/node1-sdb.xml"
duplicatedDeviceRaw = "/tmp/hotplug_disks/node1-sdb.qcow2"
)

originalDeviceRaw := fmt.Sprintf("%s/node1-sda.qcow2", s.hotplugTargetBaseDir)
duplicatedDeviceXML := fmt.Sprintf("%s/node1-sdb.xml", s.hotplugTargetBaseDir)
duplicatedDeviceRaw := fmt.Sprintf("%s/node1-sdb.qcow2", s.hotplugTargetBaseDir)

cmdCpyRawFile := fmt.Sprintf("cp %s %s", originalDeviceRaw, duplicatedDeviceRaw)
_, _, err := doCommand(cmdCpyRawFile)
require.Equal(s.T(), err, nil, "Running command `cp the raw device file` should not get error")
require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmdCpyRawFile)

hotplugDiskXMLFileName := fmt.Sprintf("%s/node1-sda.xml", s.hotplugTargetBaseDir)
disk, err := utils.DiskXMLReader(hotplugDiskXMLFileName)
require.Equal(s.T(), err, nil, "Read xml file should not get error")
disk.Source.File = duplicatedDeviceRaw
Expand All @@ -171,9 +181,9 @@ func (s *HotPlugTestSuite) Test_3_AddDuplicatedWWNDsik() {
err = utils.XMLWriter(duplicatedDeviceXML, disk)
require.Equal(s.T(), err, nil, "Write xml file should not get error")

cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", hotplugTargetNodeName, duplicatedDeviceXML)
cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", s.hotplugTargetNodeName, duplicatedDeviceXML)
_, _, err = doCommand(cmd)
require.Equal(s.T(), err, nil, "Running command `virsh attach-device` should not get error")
require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd)

// wait for controller handling
time.Sleep(5 * time.Second)
Expand All @@ -186,19 +196,19 @@ func (s *HotPlugTestSuite) Test_3_AddDuplicatedWWNDsik() {
require.Equal(s.T(), 1, len(blockdeviceList.Items), "We should have one disks because duplicated wwn should not added")

// cleanup this disk
cmd = fmt.Sprintf("virsh detach-disk %s %s --live", hotplugTargetNodeName, "sdb")
cmd = fmt.Sprintf("virsh detach-disk %s %s --live", s.hotplugTargetNodeName, "sdb")
_, _, err = doCommand(cmd)
require.Equal(s.T(), err, nil, "Running command `virsh detach-disk` should not get error")
require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd)

// wait for controller handling
time.Sleep(5 * time.Second)
}

func (s *HotPlugTestSuite) Test_4_RemoveInactiveDisk() {
// remove disk dynamically
cmd := fmt.Sprintf("virsh detach-disk %s %s --live", hotplugTargetNodeName, hotplugTargetDiskName)
cmd := fmt.Sprintf("virsh detach-disk %s %s --live", s.hotplugTargetNodeName, hotplugTargetDiskName)
_, _, err := doCommand(cmd)
require.Equal(s.T(), err, nil, "Running command `virsh detach-disk` should not get error")
require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd)

// wait for controller handling
time.Sleep(5 * time.Second)
Expand Down

0 comments on commit bb82d79

Please sign in to comment.