diff --git a/.gitignore b/.gitignore index 5cc8311a6e..01f04e5b3d 100644 --- a/.gitignore +++ b/.gitignore @@ -53,4 +53,7 @@ tilt-resources/cloud # test generated files test/e2e/report.xml coverage.out -__debug_bin* \ No newline at end of file +__debug_bin* + +# make lint cache +.cache/ diff --git a/Makefile b/Makefile index 4c9030e5cb..b30b985a1b 100644 --- a/Makefile +++ b/Makefile @@ -471,7 +471,7 @@ go-generate: # make new-changelog CHANGELOG_BODY="Changes you have made" new-changelog: GH_LOGIN ?= $(shell gh pr view --json author --jq .author.login 2> /dev/null) new-changelog: GH_PR_NUMBER ?= $(shell gh pr view --json number --jq .number 2> /dev/null) -new-changelog: CHANGELOG_BODY ?= "$(shell gh pr view --json title --jq .title)" +new-changelog: CHANGELOG_BODY ?= '$(shell gh pr view --json title --jq .title)' new-changelog: @if [ "$(GH_LOGIN)" = "" ]; then \ echo "branch does not have PR or cli not logged in, try 'gh auth login' or 'gh pr create'"; \ @@ -479,4 +479,4 @@ new-changelog: fi @mkdir -p ./changelogs/unreleased/ && \ echo $(CHANGELOG_BODY) > ./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN) && \ - echo "\"$(CHANGELOG_BODY)\" added to ./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN)" + echo \"$(CHANGELOG_BODY)\" added to "./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN)" diff --git a/changelogs/unreleased/8383-mayankagg9722 b/changelogs/unreleased/8383-mayankagg9722 new file mode 100644 index 0000000000..bd7581bee0 --- /dev/null +++ b/changelogs/unreleased/8383-mayankagg9722 @@ -0,0 +1 @@ +Adding support in velero Resource Policies for filtering PVs based on additional VolumeAttributes properties under CSI PVs \ No newline at end of file diff --git a/changelogs/unreleased/8459-Lyndon-Li b/changelogs/unreleased/8459-Lyndon-Li new file mode 100644 index 0000000000..f4f6347566 --- /dev/null +++ b/changelogs/unreleased/8459-Lyndon-Li @@ -0,0 +1 @@ +For issue #8429. Add the design for multi-arch build and windows build \ No newline at end of file diff --git a/design/Implemented/supporting-volumeattributes-resource-policy.md b/design/Implemented/supporting-volumeattributes-resource-policy.md new file mode 100644 index 0000000000..a4f4d1bfc4 --- /dev/null +++ b/design/Implemented/supporting-volumeattributes-resource-policy.md @@ -0,0 +1,84 @@ +# Adding Support For VolumeAttributes in Resource Policy + +## Abstract +Currently [Velero Resource policies](https://velero.io/docs/main/resource-filtering/#creating-resource-policies) are only supporting "Driver" to be filtered for [CSI volume conditions](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources_validator.go#L28) + +If user want to skip certain CSI volumes based on other volume attributes like protocol or SKU, etc, they can't do it with the current Velero resource policies. It would be convenient if Velero resource policies could be extended to filter on volume attributes along with existing driver filter in the resource policies `conditions` to handle the backup of volumes just by `some specific volumes attributes conditions`. + +## Background +As of Today, Velero resource policy already provides us the way to filter volumes based on the `driver` name. But it's not enough to handle the volumes based on other volume attributes like protocol, SKU, etc. + +## Example: + - Provision Azure NFS: Define the Storage class with `protocol: nfs` under storage class parameters to provision [CSI NFS Azure File Shares](https://learn.microsoft.com/en-us/azure/aks/azure-files-csi#nfs-file-shares). + - User wants to back up AFS (Azure file shares) but only want to backup `SMB` type of file share volumes and not `NFS` file share volumes. + +## Goals +- We are only bringing additional support in the resource policy to only handle volumes during backup. +- Introducing support for `VolumeAttributes` filter along with `driver` filter in CSI volume conditions to handle volumes. + +## Non-Goals +- Currently, only handles volumes, and does not support other resources. + +## Use-cases/Scenarios +### Skip backup volumes by some volume attributes: +Users want to skip PV with the requirements: +- option to skip specified PV on volume attributes type (like Protocol as NFS, SMB, etc) + +### Sample Storage Class Used to create such Volumes +``` +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: azurefile-csi-nfs +provisioner: file.csi.azure.com +allowVolumeExpansion: true +parameters: + protocol: nfs +``` + +## High-Level Design +Modifying the existing Resource Policies code for [csiVolumeSource](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources_validator.go#L28C6-L28C22) to add the new `VolumeAttributes` filter for CSI volumes and adding validations in existing [csiCondition](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources.go#L150) to match with volume attributes in the conditions from Resource Policy config map and original persistent volume. + +## Detailed Design +The volume resources policies should contain a list of policies which is the combination of conditions and related `action`, when target volumes meet the conditions, the related `action` will take effection. + +Below is the API Design for the user configuration: + +### API Design +```go +type csiVolumeSource struct { + Driver string `yaml:"driver,omitempty"` + // [NEW] CSI volume attributes + VolumeAttributes map[string]string `yaml:"volumeAttributes,omitempty"` +} +``` + +The policies YAML config file would look like this: +```yaml +version: v1 +volumePolicies: + - conditions: + csi: + driver: disk.csi.azure.com + action: + type: skip + - conditions: + csi: + driver: file.csi.azure.com + volumeAttributes: + protocol: nfs + action: + type: skip` +``` + +### New Supported Conditions +#### VolumeAttributes +Existing CSI Volume Condition can now add `volumeAttributes` which will be key and value pairs. + + Specify details for the related volume source (currently only csi driver is supported filter) + ```yaml + csi: // match volume using `file.csi.azure.com` and with volumeAttributes protocol as nfs + driver: file.csi.azure.com + volumeAttributes: + protocol: nfs + ``` \ No newline at end of file diff --git a/design/multiple-arch-build-with-windows.md b/design/multiple-arch-build-with-windows.md new file mode 100644 index 0000000000..a6bdc381f2 --- /dev/null +++ b/design/multiple-arch-build-with-windows.md @@ -0,0 +1,121 @@ +# Multi-arch Build and Windows Build Support + +## Background + +At present, Velero images could be built for linux-amd64 and linux-arm64. We need to support other platforms, i.e., windows-amd64. +At present, for linux image build, we leverage Buildkit's `--platform` option to create the image manifest list in one build call. However, it is a limited way and doesn't fully support all multi-arch scenarios. Specifically, since the build is done in one call with the same parameters, it is impossbile to build images with different configurations (e.g., Windows build requires a different Dockerfile). +At present, Velero by default build images locally, or no image or manifest is pushed to registry. However, docker doesn't support multi-arch build locally. We need to clarify the behavior of local build. + +## Goals +- Refactor the `make container` process to fully support multi-arch build +- Add Windows build to the existing build process +- Clarify the behavior of local build with multi-arch build capabilities +- Don't change the pattern of the final image tag to be used by users + +## Non-Goals +- There may be some workarounds to make the multi-arch image/manifest fully available locally. These workarounds will not be adopted, so local build always build single-arch images + +## Local Build + +For local build, two values of `--output` parameter for `docker buildx build` are supported: +- `docker`: a docker format image is built, but the image is only built for the platform (`/`) as same as the building env. E.g., when building from linux-amd64 env, a single manifest of linux-amd64 is created regardless how the input parameters are configured. +- `tar`: one or more images are built as tarballs according to the input platform (`/`) parameters. Specifically, one tarball is generated for each platform. The build process is the same with the `Build Separate Manifests` of `Push Build` as detailed below. Merely, the `--output` parameter diffs, as `type=tar;dest=`. The tarball is generated to the `_output` folder and named with the platform info, e.g., `_output/velero-main-linux-amd64.tar`. + +## Push Build + +For push build, the `--output` parameter for `docker buildx build` is always `registry`. And build will go according to the input parameters and create multi-arch manifest lists. + +### Step 1: Build Separate Manifests + +Instead of specifying multiple platforms (`/`) to `--platform` option, we add multiple `container-%` targets in Makefile and each target builds one platform representively. + +The goal here is to build multiple manifests through the multiple targets. However, `docker buildx build` by default creates a manifest list even though there is only one element in `--platform`. Therefore, two flags `--provenance=false` and `--sbom=false` will be set additionally to force `docker buildx build` to create manifests. + +Each manifest has a unique tag, the OS type and arch is added to the tag, in the pattern `$(REGISTRY)/$(BIN):$(VERSION)-$(OS)-$(ARCH)`. For example, `velero/velero:main-linux-amd64`. + +All the created manifests will be pushed to registry so that the all-in-one manifest list could be created. + +### Step 2: Create All-In-One Manifest List + +The next step is to create a manifest list to include all the created manifests. This could be done by `docker manifest create` command, the tags created and pushed at Step 1 are passed to this command. +A tag is also created for the manifest list, in the pattern `$(REGISTRY)/$(BIN):$(VERSION)`. For example, `velero/velero:main`. + +### Step 3: Push All-In-One Manifest List + +The created manifest will be pushed to registry by command `docker manifest push`. + +## Input Parameters + +Below are the input parameters that are configurable to meet different build purposes during Dev and release cycle: +- BUILD_OUTPUT_TYPE: the type of output for the build, i.e., `docker`, `tar`, `registry`, while `docker` and `tar` is for local build; `registry` means push build. Default value is `docker` +- BUILD_OS: which types of OS should be built for. Multiple values are accepted, e.g., `linux,windows`. Default value is `linux` +- BUILD_ARCH: which types of architecture should be built for. Multiple values are accepted, e.g., `amd64,arm64`. Default value is `amd64` + +## Windows Build + +Windows container images vary from Windows OS versions, e.g., `ltsc2022` for Windows server 2022 and `1809` for Windows server 2019. Images for different OS versions should be built separately. +Therefore, separate build targets are added for each OS version, like `container-windows-%`. +For the same reason, a new input parameter is added, `BUILD_WINDOWS_VERSION`. The default value is `ltsc2022`. Windows server 2022 is the only base image we will deliver officially, Windows server 2019 is not supported. In future, we may need to support Windows server 2025 base image. +For local build to tar, the Windows OS version is also added to the name of the tarball, e.g., `_output/velero-main-windows-ltsc2022-amd64.tar`. + +At present, Windows container image only supports `amd64` as the architecture, so `BUILD_ARCH` is ignored for Windows. + +The Windows manifests need to be annotated with os type, arch, and os version. This will be done through `docker manifest annotate` command. + +## Use Malti-arch Images + +In order to use the images, the manifest list's tag should be provided to `velero install` command or helm, the individual manifests are covered by the manifest list. During launch time, the container engine will load the right image to the container according to the platform of the running node. + +## Build Samples + +**Local build to docker** +``` +make container +``` +The built image could be listed by `docker image ls`. + +**Local build for linux-amd64 and windows-amd64 to tar** +``` +BUILDX_OUTPUT_TYPE=tar BUILD_OS=linux,windows make container +``` +Under `_output` directory, below files are generated: +``` +velero-main-linux-amd64.tar +velero-main-windows-ltsc2022-amd64.tar +``` + +**Local build for linux-amd64, linux-arm64 and windows-amd64 to tar** +``` +BUILDX_OUTPUT_TYPE=tar BUILD_OS=linux,windows BUILD_ARCH=amd64,arm64 make container +``` +Under `_output` directory, below files are generated: +``` +velero-main-linux-amd64.tar +velero-main-linux-arm64.tar +velero-main-windows-ltsc2022-amd64.tar +``` + +**Push build for linux-amd64 and windows-amd64** +Prerequisite: login to registry, e.g., through `docker login` +``` +BUILDX_OUTPUT_TYPE=registry REGISTRY= BUILD_OS=linux,windows make container +``` +Nothing is available locally, in the registry 3 tags are available: +``` +velero/velero:main +velero/velero:main-windows-ltsc2022-amd64 +velero/velero:main-linux-amd64 +``` + +**Push build for linux-amd64, linux-arm64 and windows-amd64** +Prerequisite: login to registry, e.g., through `docker login` +``` +BUILDX_OUTPUT_TYPE=registry REGISTRY= BUILD_OS=linux,windows BUILD_ARCH=amd64,arm64 make container +``` +Nothing is available locally, in the registry 4 tags are available: +``` +velero/velero:main +velero/velero:main-windows-ltsc2022-amd64 +velero/velero:main-linux-amd64 +velero/velero:main-linux-arm64 +``` diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index 4a3b2b716d..a97fa75a6f 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -999,11 +999,6 @@ func TestMaxHookWait(t *testing.T) { } func TestRestoreHookTrackerUpdate(t *testing.T) { - type change struct { - // delta to wait since last change applied or pod added - wait time.Duration - updated *v1.Pod - } type expectedExecution struct { hook *velerov1api.ExecHook name string diff --git a/internal/resourcepolicies/resource_policies_test.go b/internal/resourcepolicies/resource_policies_test.go index e21458b2d1..53e79a1342 100644 --- a/internal/resourcepolicies/resource_policies_test.go +++ b/internal/resourcepolicies/resource_policies_test.go @@ -93,20 +93,32 @@ func TestLoadResourcePolicies(t *testing.T) { wantErr: true, }, { - name: "supported formart volume policies", + name: "supported format volume policies", yamlData: `version: v1 - volumePolicies: - - conditions: - capacity: "0,100Gi" - csi: - driver: aws.efs.csi.driver - nfs: {} - storageClass: - - gp2 - - ebs-sc - action: - type: skip`, - wantErr: true, +volumePolicies: + - conditions: + capacity: '0,100Gi' + csi: + driver: aws.efs.csi.driver + action: + type: skip +`, + wantErr: false, + }, + { + name: "supported format csi driver with volumeAttributes for volume policies", + yamlData: `version: v1 +volumePolicies: + - conditions: + capacity: '0,100Gi' + csi: + driver: aws.efs.csi.driver + volumeAttributes: + key1: value1 + action: + type: skip +`, + wantErr: false, }, } for _, tc := range testCases { @@ -135,6 +147,16 @@ func TestGetResourceMatchedAction(t *testing.T) { }), }, }, + { + Action: Action{Type: "skip"}, + Conditions: map[string]interface{}{ + "csi": interface{}( + map[string]interface{}{ + "driver": "files.csi.driver", + "volumeAttributes": map[string]string{"protocol": "nfs"}, + }), + }, + }, { Action: Action{Type: "snapshot"}, Conditions: map[string]interface{}{ @@ -172,6 +194,24 @@ func TestGetResourceMatchedAction(t *testing.T) { }, expectedAction: &Action{Type: "skip"}, }, + { + name: "match policy AFS NFS", + volume: &structuredVolume{ + capacity: *resource.NewQuantity(5<<30, resource.BinarySI), + storageClass: "afs-nfs", + csi: &csiVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"protocol": "nfs"}}, + }, + expectedAction: &Action{Type: "skip"}, + }, + { + name: "match policy AFS SMB", + volume: &structuredVolume{ + capacity: *resource.NewQuantity(5<<30, resource.BinarySI), + storageClass: "afs-smb", + csi: &csiVolumeSource{Driver: "files.csi.driver"}, + }, + expectedAction: nil, + }, { name: "both matches return the first policy", volume: &structuredVolume{ @@ -226,7 +266,7 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) { Namespace: "test-namespace", }, Data: map[string]string{ - "test-data": "version: v1\nvolumePolicies:\n- conditions:\n capacity: '0,10Gi'\n action:\n type: skip", + "test-data": "version: v1\nvolumePolicies:\n - conditions:\n capacity: '0,10Gi'\n csi:\n driver: disks.csi.driver\n action:\n type: skip\n - conditions:\n csi:\n driver: files.csi.driver\n volumeAttributes:\n protocol: nfs\n action:\n type: skip", }, } @@ -236,13 +276,27 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) { // Check that the returned resourcePolicies object contains the expected data assert.Equal(t, "v1", resPolicies.version) - assert.Len(t, resPolicies.volumePolicies, 1) + assert.Len(t, resPolicies.volumePolicies, 2) policies := ResourcePolicies{ Version: "v1", VolumePolicies: []VolumePolicy{ { Conditions: map[string]interface{}{ "capacity": "0,10Gi", + "csi": map[string]interface{}{ + "driver": "disks.csi.driver", + }, + }, + Action: Action{ + Type: Skip, + }, + }, + { + Conditions: map[string]interface{}{ + "csi": map[string]interface{}{ + "driver": "files.csi.driver", + "volumeAttributes": map[string]string{"protocol": "nfs"}, + }, }, Action: Action{ Type: Skip, @@ -298,7 +352,173 @@ volumePolicies: skip: false, }, { - name: "csi not configured", + name: "Skip AFS CSI condition with Disk volumes", + yamlData: `version: v1 +volumePolicies: + - conditions: + csi: + driver: files.csi.driver + action: + type: skip`, + vol: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver"}, + }}, + }, + skip: false, + }, + { + name: "Skip AFS CSI condition with AFS volumes", + yamlData: `version: v1 +volumePolicies: + - conditions: + csi: + driver: files.csi.driver + action: + type: skip`, + vol: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver"}, + }}, + }, + skip: true, + }, + { + name: "Skip AFS NFS CSI condition with Disk volumes", + yamlData: `version: v1 +volumePolicies: + - conditions: + csi: + driver: files.csi.driver + volumeAttributes: + protocol: nfs + action: + type: skip +`, + vol: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver"}, + }}, + }, + skip: false, + }, + { + name: "Skip AFS NFS CSI condition with AFS SMB volumes", + yamlData: `version: v1 +volumePolicies: + - conditions: + csi: + driver: files.csi.driver + volumeAttributes: + protocol: nfs + action: + type: skip +`, + vol: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}}, + }}, + }, + skip: false, + }, + { + name: "Skip AFS NFS CSI condition with AFS NFS volumes", + yamlData: `version: v1 +volumePolicies: + - conditions: + csi: + driver: files.csi.driver + volumeAttributes: + protocol: nfs + action: + type: skip +`, + vol: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"protocol": "nfs"}}, + }}, + }, + skip: true, + }, + { + name: "Skip Disk and AFS NFS CSI condition with Disk volumes", + yamlData: `version: v1 +volumePolicies: + - conditions: + csi: + driver: disks.csi.driver + action: + type: skip + - conditions: + csi: + driver: files.csi.driver + volumeAttributes: + protocol: nfs + action: + type: skip`, + vol: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}}, + }}, + }, + skip: true, + }, + { + name: "Skip Disk and AFS NFS CSI condition with AFS SMB volumes", + yamlData: `version: v1 +volumePolicies: + - conditions: + csi: + driver: disks.csi.driver + action: + type: skip + - conditions: + csi: + driver: files.csi.driver + volumeAttributes: + protocol: nfs + action: + type: skip`, + vol: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}}, + }}, + }, + skip: false, + }, + { + name: "Skip Disk and AFS NFS CSI condition with AFS NFS volumes", + yamlData: `version: v1 +volumePolicies: + - conditions: + csi: + driver: disks.csi.driver + action: + type: skip + - conditions: + csi: + driver: files.csi.driver + volumeAttributes: + protocol: nfs + action: + type: skip`, + vol: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1", "protocol": "nfs"}}, + }}, + }, + skip: true, + }, + { + name: "csi not configured and testing capacity condition", yamlData: `version: v1 volumePolicies: - conditions: diff --git a/internal/resourcepolicies/volume_resources.go b/internal/resourcepolicies/volume_resources.go index fd1b8182ac..6abdb0648e 100644 --- a/internal/resourcepolicies/volume_resources.go +++ b/internal/resourcepolicies/volume_resources.go @@ -60,7 +60,7 @@ func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) { csi := pv.Spec.CSI if csi != nil { - s.csi = &csiVolumeSource{Driver: csi.Driver} + s.csi = &csiVolumeSource{Driver: csi.Driver, VolumeAttributes: csi.VolumeAttributes} } s.volumeType = getVolumeTypeFromPV(pv) @@ -74,7 +74,7 @@ func (s *structuredVolume) parsePodVolume(vol *corev1api.Volume) { csi := vol.CSI if csi != nil { - s.csi = &csiVolumeSource{Driver: csi.Driver} + s.csi = &csiVolumeSource{Driver: csi.Driver, VolumeAttributes: csi.VolumeAttributes} } s.volumeType = getVolumeTypeFromVolume(vol) @@ -160,7 +160,25 @@ func (c *csiCondition) match(v *structuredVolume) bool { return false } - return c.csi.Driver == v.csi.Driver + if c.csi.Driver != v.csi.Driver { + return false + } + + if len(c.csi.VolumeAttributes) == 0 { + return true + } + + if len(v.csi.VolumeAttributes) == 0 { + return false + } + + for key, value := range c.csi.VolumeAttributes { + if value != v.csi.VolumeAttributes[key] { + return false + } + } + + return true } // parseCapacity parse string into capacity format diff --git a/internal/resourcepolicies/volume_resources_test.go b/internal/resourcepolicies/volume_resources_test.go index 4d5d7a743a..9eca07ef75 100644 --- a/internal/resourcepolicies/volume_resources_test.go +++ b/internal/resourcepolicies/volume_resources_test.go @@ -201,23 +201,47 @@ func TestCSIConditionMatch(t *testing.T) { expectedMatch bool }{ { - name: "match csi condition", + name: "match csi driver condition", condition: &csiCondition{&csiVolumeSource{Driver: "test"}}, volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test"}), expectedMatch: true, }, { - name: "empty csi condition", + name: "empty csi driver condition", condition: &csiCondition{nil}, volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test"}), expectedMatch: true, }, { - name: "empty csi volume", + name: "empty csi driver volume", condition: &csiCondition{&csiVolumeSource{Driver: "test"}}, volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{}), expectedMatch: false, }, + { + name: "match csi volumeAttributes condition", + condition: &csiCondition{&csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}}, + volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}), + expectedMatch: true, + }, + { + name: "empty csi volumeAttributes condition", + condition: &csiCondition{&csiVolumeSource{Driver: "test"}}, + volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}), + expectedMatch: true, + }, + { + name: "empty csi volumeAttributes volume", + condition: &csiCondition{&csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}}, + volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": ""}}), + expectedMatch: false, + }, + { + name: "empty csi volumeAttributes volume", + condition: &csiCondition{&csiVolumeSource{Driver: "test", VolumeAttributes: map[string]string{"protocol": "nfs"}}}, + volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", nil, &csiVolumeSource{Driver: "test"}), + expectedMatch: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -302,7 +326,8 @@ func TestParsePodVolume(t *testing.T) { } csiVolume := corev1api.Volume{} csiVolume.CSI = &corev1api.CSIVolumeSource{ - Driver: "csi.example.com", + Driver: "csi.example.com", + VolumeAttributes: map[string]string{"protocol": "nfs"}, } emptyVolume := corev1api.Volume{} @@ -321,7 +346,7 @@ func TestParsePodVolume(t *testing.T) { { name: "CSI volume", inputVolume: &csiVolume, - expectedCSI: &csiVolumeSource{Driver: "csi.example.com"}, + expectedCSI: &csiVolumeSource{Driver: "csi.example.com", VolumeAttributes: map[string]string{"protocol": "nfs"}}, }, { name: "Empty volume", @@ -348,9 +373,19 @@ func TestParsePodVolume(t *testing.T) { if tc.expectedCSI != nil { if structuredVolume.csi == nil { t.Errorf("Expected a non-nil CSI volume source") - } else if *tc.expectedCSI != *structuredVolume.csi { + } else if tc.expectedCSI.Driver != structuredVolume.csi.Driver { t.Errorf("CSI volume source does not match expected value") } + // Check volumeAttributes + if len(tc.expectedCSI.VolumeAttributes) != len(structuredVolume.csi.VolumeAttributes) { + t.Errorf("CSI volume attributes does not match expected value") + } else { + for k, v := range tc.expectedCSI.VolumeAttributes { + if structuredVolume.csi.VolumeAttributes[k] != v { + t.Errorf("CSI volume attributes does not match expected value") + } + } + } } }) } @@ -363,7 +398,7 @@ func TestParsePV(t *testing.T) { nfsVolume.Spec.NFS = &corev1api.NFSVolumeSource{Server: "nfs.example.com", Path: "/exports/data"} csiVolume := corev1api.PersistentVolume{} csiVolume.Spec.Capacity = corev1api.ResourceList{corev1api.ResourceStorage: resource.MustParse("2Gi")} - csiVolume.Spec.CSI = &corev1api.CSIPersistentVolumeSource{Driver: "csi.example.com"} + csiVolume.Spec.CSI = &corev1api.CSIPersistentVolumeSource{Driver: "csi.example.com", VolumeAttributes: map[string]string{"protocol": "nfs"}} emptyVolume := corev1api.PersistentVolume{} // Test cases @@ -383,7 +418,7 @@ func TestParsePV(t *testing.T) { name: "CSI volume", inputVolume: &csiVolume, expectedNFS: nil, - expectedCSI: &csiVolumeSource{Driver: "csi.example.com"}, + expectedCSI: &csiVolumeSource{Driver: "csi.example.com", VolumeAttributes: map[string]string{"protocol": "nfs"}}, }, { name: "Empty volume", @@ -415,9 +450,19 @@ func TestParsePV(t *testing.T) { if tc.expectedCSI != nil { if structuredVolume.csi == nil { t.Errorf("Expected a non-nil CSI volume source") - } else if *tc.expectedCSI != *structuredVolume.csi { + } else if tc.expectedCSI.Driver != structuredVolume.csi.Driver { t.Errorf("CSI volume source does not match expected value") } + // Check volumeAttributes + if len(tc.expectedCSI.VolumeAttributes) != len(structuredVolume.csi.VolumeAttributes) { + t.Errorf("CSI volume attributes does not match expected value") + } else { + for k, v := range tc.expectedCSI.VolumeAttributes { + if structuredVolume.csi.VolumeAttributes[k] != v { + t.Errorf("CSI volume attributes does not match expected value") + } + } + } } }) } diff --git a/internal/resourcepolicies/volume_resources_validator.go b/internal/resourcepolicies/volume_resources_validator.go index cf9a40c0fc..f2ca97a300 100644 --- a/internal/resourcepolicies/volume_resources_validator.go +++ b/internal/resourcepolicies/volume_resources_validator.go @@ -27,6 +27,8 @@ const currentSupportDataVersion = "v1" type csiVolumeSource struct { Driver string `yaml:"driver,omitempty"` + // CSI volume attributes + VolumeAttributes map[string]string `yaml:"volumeAttributes,omitempty"` } type nFSVolumeSource struct { @@ -68,7 +70,10 @@ func (c *nfsCondition) validate() error { } func (c *csiCondition) validate() error { - // validate by yamlv3 + if c != nil && c.csi != nil && c.csi.Driver == "" && c.csi.VolumeAttributes != nil { + return errors.New("csi driver should not be empty when filtering by volume attributes") + } + return nil } diff --git a/internal/resourcepolicies/volume_resources_validator_test.go b/internal/resourcepolicies/volume_resources_validator_test.go index 1cbc6d7325..a74bbc52fa 100644 --- a/internal/resourcepolicies/volume_resources_validator_test.go +++ b/internal/resourcepolicies/volume_resources_validator_test.go @@ -165,6 +165,47 @@ func TestValidate(t *testing.T) { }, wantErr: true, }, + { + name: "error format of csi driver", + res: &ResourcePolicies{ + Version: "v1", + VolumePolicies: []VolumePolicy{ + { + Action: Action{Type: "skip"}, + Conditions: map[string]interface{}{ + "capacity": "0,10Gi", + "storageClass": []string{"gp2", "ebs-sc"}, + "csi": interface{}( + map[string]interface{}{ + "driver": []string{"aws.efs.csi.driver"}, + }), + }, + }, + }, + }, + wantErr: true, + }, + { + name: "error format of csi driver volumeAttributes", + res: &ResourcePolicies{ + Version: "v1", + VolumePolicies: []VolumePolicy{ + { + Action: Action{Type: "skip"}, + Conditions: map[string]interface{}{ + "capacity": "0,10Gi", + "storageClass": []string{"gp2", "ebs-sc"}, + "csi": interface{}( + map[string]interface{}{ + "driver": "aws.efs.csi.driver", + "volumeAttributes": "test", + }), + }, + }, + }, + }, + wantErr: true, + }, { name: "unsupported version", res: &ResourcePolicies{ @@ -220,6 +261,65 @@ func TestValidate(t *testing.T) { }, wantErr: true, }, + { + name: "supported format volume policies only csi driver", + res: &ResourcePolicies{ + Version: "v1", + VolumePolicies: []VolumePolicy{ + { + Action: Action{Type: "skip"}, + Conditions: map[string]interface{}{ + "csi": interface{}( + map[string]interface{}{ + "driver": "aws.efs.csi.driver", + }), + }, + }, + }, + }, + wantErr: false, + }, + { + name: "unsupported format volume policies only csi volumeattributes", + res: &ResourcePolicies{ + Version: "v1", + VolumePolicies: []VolumePolicy{ + { + Action: Action{Type: "skip"}, + Conditions: map[string]interface{}{ + "csi": interface{}( + map[string]interface{}{ + "volumeAttributes": map[string]string{ + "key1": "value1", + }, + }), + }, + }, + }, + }, + wantErr: true, + }, + { + name: "supported format volume policies with csi driver and volumeattributes", + res: &ResourcePolicies{ + Version: "v1", + VolumePolicies: []VolumePolicy{ + { + Action: Action{Type: "skip"}, + Conditions: map[string]interface{}{ + "csi": interface{}( + map[string]interface{}{ + "driver": "aws.efs.csi.driver", + "volumeAttributes": map[string]string{ + "key1": "value1", + }, + }), + }, + }, + }, + }, + wantErr: false, + }, { name: "supported format volume policies", res: &ResourcePolicies{ diff --git a/internal/volume/volumes_information.go b/internal/volume/volumes_information.go index 7d793425c4..9af6d7fe34 100644 --- a/internal/volume/volumes_information.go +++ b/internal/volume/volumes_information.go @@ -49,13 +49,7 @@ const ( const ( FieldValueIsUnknown string = "unknown" - kopia string = "kopia" veleroDatamover string = "velero" - - //TODO reuse these constants from csi-plugin-for-velero after it's merged into the same repo - - CSIDriverNameAnnotation = "velero.io/csi-driver-name" - VolumeSnapshotHandleAnnotation = "velero.io/csi-volumesnapshot-handle" ) type BackupVolumeInfo struct { @@ -647,7 +641,7 @@ func (v *BackupVolumesInformation) generateVolumeInfoFromDataUpload() { }, SnapshotDataMovementInfo: &SnapshotDataMovementInfo{ DataMover: dataMover, - UploaderType: kopia, + UploaderType: velerov1api.BackupRepositoryTypeKopia, OperationID: operation.Spec.OperationID, Phase: dataUpload.Status.Phase, }, @@ -850,9 +844,9 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo { SnapshotDataMoved: false, RestoreMethod: CSISnapshot, CSISnapshotInfo: &CSISnapshotInfo{ - SnapshotHandle: csiSnapshot.Annotations[VolumeSnapshotHandleAnnotation], + SnapshotHandle: csiSnapshot.Annotations[velerov1api.VolumeSnapshotHandleAnnotation], Size: restoreSize, - Driver: csiSnapshot.Annotations[CSIDriverNameAnnotation], + Driver: csiSnapshot.Annotations[velerov1api.DriverNameAnnotation], VSCName: vscName, }, } @@ -889,7 +883,7 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo { RestoreMethod: CSISnapshot, SnapshotDataMovementInfo: &SnapshotDataMovementInfo{ DataMover: dataMover, - UploaderType: kopia, + UploaderType: velerov1api.BackupRepositoryTypeKopia, SnapshotHandle: dd.Spec.SnapshotID, OperationID: operationID, }, diff --git a/internal/volume/volumes_information_test.go b/internal/volume/volumes_information_test.go index 8afd44dde6..3233e98ce0 100644 --- a/internal/volume/volumes_information_test.go +++ b/internal/volume/volumes_information_test.go @@ -1170,8 +1170,8 @@ func TestRestoreVolumeInfoResult(t *testing.T) { pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{ "testNS/testPVC": *builder.ForVolumeSnapshot("sourceNS", "testCSISnapshot"). ObjectMeta( - builder.WithAnnotations(VolumeSnapshotHandleAnnotation, "csi-snap-001", - CSIDriverNameAnnotation, "test-csi-driver"), + builder.WithAnnotations(velerov1api.VolumeSnapshotHandleAnnotation, "csi-snap-001", + velerov1api.DriverNameAnnotation, "test-csi-driver"), ).SourceVolumeSnapshotContentName("test-vsc-001"). Status().RestoreSize("1Gi").Result(), }, @@ -1269,7 +1269,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) { SnapshotDataMoved: true, SnapshotDataMovementInfo: &SnapshotDataMovementInfo{ DataMover: "velero", - UploaderType: kopia, + UploaderType: velerov1api.BackupRepositoryTypeKopia, SnapshotHandle: "dd-snap-001", OperationID: "dd-operation-001", }, @@ -1282,7 +1282,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) { SnapshotDataMoved: true, SnapshotDataMovementInfo: &SnapshotDataMovementInfo{ DataMover: "velero", - UploaderType: kopia, + UploaderType: velerov1api.BackupRepositoryTypeKopia, SnapshotHandle: "dd-snap-002", OperationID: "dd-operation-002", }, diff --git a/site/content/docs/main/api-types/restore.md b/site/content/docs/main/api-types/restore.md index 7b388d8828..ec3e19511c 100644 --- a/site/content/docs/main/api-types/restore.md +++ b/site/content/docs/main/api-types/restore.md @@ -111,6 +111,11 @@ spec: # existingResourcePolicy specifies the restore behaviour # for the Kubernetes resource to be restored. Optional existingResourcePolicy: none + # ResourceModifier specifies the reference to JSON resource patches + # that should be applied to resources before restoration. Optional + resourceModifier: + kind: ConfigMap + name: resource-modifier-configmap # Actions to perform during or post restore. The only hooks currently supported are # adding an init container to a pod before it can be restored and executing a command in a # restored pod's container. Optional. diff --git a/site/content/docs/main/supported-providers.md b/site/content/docs/main/supported-providers.md index 6fc53f8e6b..d83a4c563a 100644 --- a/site/content/docs/main/supported-providers.md +++ b/site/content/docs/main/supported-providers.md @@ -27,6 +27,7 @@ Contact: [#Velero Slack](https://kubernetes.slack.com/messages/velero), [GitHub | [AlibabaCloud](https://www.alibabacloud.com/) | Alibaba Cloud OSS | Alibaba Cloud | [AlibabaCloud](https://github.com/AliyunContainerService/velero-plugin) | [GitHub Issue](https://github.com/AliyunContainerService/velero-plugin/issues) | | [DigitalOcean](https://www.digitalocean.com/) | DigitalOcean Object Storage | DigitalOcean Volumes Block Storage | [StackPointCloud](https://github.com/StackPointCloud/ark-plugin-digitalocean) | | | [Hewlett Packard](https://www.hpe.com/us/en/storage.html) | 🚫 | HPE Storage | [Hewlett Packard](https://github.com/hpe-storage/velero-plugin) | [Slack](https://slack.hpedev.io/), [GitHub Issue](https://github.com/hpe-storage/velero-plugin/issues) | +| [HuaweiCloud](https://www.huaweicloud.com) | HuaweiCloud OBS | 🚫 | [HuaweiCloud](https://github.com/setoru/velero-plugin-for-huaweicloud) | [GitHub Issue](https://github.com/setoru/velero-plugin-for-huaweicloud/issues) | | [OpenEBS](https://openebs.io/) | 🚫 | OpenEBS CStor Volume | [OpenEBS](https://github.com/openebs/velero-plugin) | [Slack](https://openebs-community.slack.com/), [GitHub Issue](https://github.com/openebs/velero-plugin/issues) | | [OpenStack](https://www.openstack.org/) | Swift | Cinder | [OpenStack](https://github.com/Lirt/velero-plugin-for-openstack) | [GitHub Issue](https://github.com/Lirt/velero-plugin-for-openstack/issues) | | [Portworx](https://portworx.com/) | 🚫 | Portworx Volume | [Portworx](https://docs.portworx.com/scheduler/kubernetes/ark.html) | [Slack](https://portworx.slack.com/messages/px-k8s), [GitHub Issue](https://github.com/portworx/ark-plugin/issues) | diff --git a/site/content/docs/v1.15/api-types/restore.md b/site/content/docs/v1.15/api-types/restore.md index 7b388d8828..ec3e19511c 100644 --- a/site/content/docs/v1.15/api-types/restore.md +++ b/site/content/docs/v1.15/api-types/restore.md @@ -111,6 +111,11 @@ spec: # existingResourcePolicy specifies the restore behaviour # for the Kubernetes resource to be restored. Optional existingResourcePolicy: none + # ResourceModifier specifies the reference to JSON resource patches + # that should be applied to resources before restoration. Optional + resourceModifier: + kind: ConfigMap + name: resource-modifier-configmap # Actions to perform during or post restore. The only hooks currently supported are # adding an init container to a pod before it can be restored and executing a command in a # restored pod's container. Optional. diff --git a/site/content/docs/v1.15/supported-providers.md b/site/content/docs/v1.15/supported-providers.md index 6fc53f8e6b..d83a4c563a 100644 --- a/site/content/docs/v1.15/supported-providers.md +++ b/site/content/docs/v1.15/supported-providers.md @@ -27,6 +27,7 @@ Contact: [#Velero Slack](https://kubernetes.slack.com/messages/velero), [GitHub | [AlibabaCloud](https://www.alibabacloud.com/) | Alibaba Cloud OSS | Alibaba Cloud | [AlibabaCloud](https://github.com/AliyunContainerService/velero-plugin) | [GitHub Issue](https://github.com/AliyunContainerService/velero-plugin/issues) | | [DigitalOcean](https://www.digitalocean.com/) | DigitalOcean Object Storage | DigitalOcean Volumes Block Storage | [StackPointCloud](https://github.com/StackPointCloud/ark-plugin-digitalocean) | | | [Hewlett Packard](https://www.hpe.com/us/en/storage.html) | 🚫 | HPE Storage | [Hewlett Packard](https://github.com/hpe-storage/velero-plugin) | [Slack](https://slack.hpedev.io/), [GitHub Issue](https://github.com/hpe-storage/velero-plugin/issues) | +| [HuaweiCloud](https://www.huaweicloud.com) | HuaweiCloud OBS | 🚫 | [HuaweiCloud](https://github.com/setoru/velero-plugin-for-huaweicloud) | [GitHub Issue](https://github.com/setoru/velero-plugin-for-huaweicloud/issues) | | [OpenEBS](https://openebs.io/) | 🚫 | OpenEBS CStor Volume | [OpenEBS](https://github.com/openebs/velero-plugin) | [Slack](https://openebs-community.slack.com/), [GitHub Issue](https://github.com/openebs/velero-plugin/issues) | | [OpenStack](https://www.openstack.org/) | Swift | Cinder | [OpenStack](https://github.com/Lirt/velero-plugin-for-openstack) | [GitHub Issue](https://github.com/Lirt/velero-plugin-for-openstack/issues) | | [Portworx](https://portworx.com/) | 🚫 | Portworx Volume | [Portworx](https://docs.portworx.com/scheduler/kubernetes/ark.html) | [Slack](https://portworx.slack.com/messages/px-k8s), [GitHub Issue](https://github.com/portworx/ark-plugin/issues) | diff --git a/test/e2e/backup/backup.go b/test/e2e/backup/backup.go index cfdc897244..2f252171b1 100644 --- a/test/e2e/backup/backup.go +++ b/test/e2e/backup/backup.go @@ -110,7 +110,7 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) { if InstallVelero { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() - err = VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace) + err = VeleroUninstall(ctx, veleroCfg) Expect(err).To(Succeed()) } } diff --git a/test/e2e/backups/sync_backups.go b/test/e2e/backups/sync_backups.go index e4532fea62..447fc735b9 100644 --- a/test/e2e/backups/sync_backups.go +++ b/test/e2e/backups/sync_backups.go @@ -72,7 +72,7 @@ func BackupsSyncTest() { if InstallVelero { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) } } }) @@ -108,7 +108,7 @@ func BackupsSyncTest() { }) By("Uninstall velero", func() { - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) }) By("Install velero", func() { diff --git a/test/e2e/backups/ttl.go b/test/e2e/backups/ttl.go index 5135c4b0b4..c2c2224e9d 100644 --- a/test/e2e/backups/ttl.go +++ b/test/e2e/backups/ttl.go @@ -84,7 +84,7 @@ func TTLTest() { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() if InstallVelero { - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) } Expect(DeleteNamespace(ctx, client, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) } diff --git a/test/e2e/basic/api-group/enable_api_group_extentions.go b/test/e2e/basic/api-group/enable_api_group_extentions.go index 586fdeafa1..546d2f7212 100644 --- a/test/e2e/basic/api-group/enable_api_group_extentions.go +++ b/test/e2e/basic/api-group/enable_api_group_extentions.go @@ -83,13 +83,11 @@ func APIExtensionsVersionsTest() { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultClusterContext)).To(Succeed()) - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) Expect(DeleteCRDByName(context.Background(), crdName)).To(Succeed()) Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyClusterContext)).To(Succeed()) - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) Expect(DeleteCRDByName(context.Background(), crdName)).To(Succeed()) }) } diff --git a/test/e2e/basic/api-group/enable_api_group_versions.go b/test/e2e/basic/api-group/enable_api_group_versions.go index cb6606672c..c0eb770626 100644 --- a/test/e2e/basic/api-group/enable_api_group_versions.go +++ b/test/e2e/basic/api-group/enable_api_group_versions.go @@ -100,7 +100,7 @@ func APIGroupVersionsTest() { }) if InstallVelero { By("Uninstall Velero in api group version case", func() { - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).NotTo(HaveOccurred()) + Expect(VeleroUninstall(ctx, veleroCfg)).NotTo(HaveOccurred()) }) } } diff --git a/test/e2e/basic/backup-volume-info/base.go b/test/e2e/basic/backup-volume-info/base.go index af54763198..15e92699e6 100644 --- a/test/e2e/basic/backup-volume-info/base.go +++ b/test/e2e/basic/backup-volume-info/base.go @@ -31,7 +31,6 @@ import ( . "github.com/vmware-tanzu/velero/test/e2e/test" . "github.com/vmware-tanzu/velero/test/util/common" . "github.com/vmware-tanzu/velero/test/util/k8s" - . "github.com/vmware-tanzu/velero/test/util/velero" ) type BackupVolumeInfo struct { @@ -108,9 +107,6 @@ func (v *BackupVolumeInfo) CreateResources() error { return errors.Wrapf(err, "Failed to create namespace %s", createNSName) } - // Install StorageClass - Expect(InstallTestStorageClasses(fmt.Sprintf("../testdata/storage-class/%s-csi.yaml", v.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install StorageClass") - // Create deployment fmt.Printf("Creating deployment in namespaces ...%s\n", createNSName) // Make sure PVC count is great than 3 to allow both empty volumes and file populated volumes exist per pod @@ -120,7 +116,7 @@ func (v *BackupVolumeInfo) CreateResources() error { var vols []*v1.Volume for i := 0; i <= pvcCount-1; i++ { pvcName := fmt.Sprintf("volume-info-pvc-%d", i) - pvc, err := CreatePVC(v.Client, createNSName, pvcName, CSIStorageClassName, nil) + pvc, err := CreatePVC(v.Client, createNSName, pvcName, StorageClassName, nil) Expect(err).To(Succeed()) volumeName := fmt.Sprintf("volume-info-pv-%d", i) vols = append(vols, CreateVolumes(pvc.Name, []string{volumeName})...) @@ -159,11 +155,3 @@ func (v *BackupVolumeInfo) Destroy() error { return WaitAllSelectedNSDeleted(v.Ctx, v.Client, "ns-test=true") } - -func (v *BackupVolumeInfo) cleanResource() error { - if err := DeleteStorageClass(v.Ctx, v.Client, CSIStorageClassName); err != nil { - return errors.Wrap(err, "fail to delete the StorageClass") - } - - return nil -} diff --git a/test/e2e/basic/backup-volume-info/csi_data_mover.go b/test/e2e/basic/backup-volume-info/csi_data_mover.go index 7e08114b5c..de1ca5c423 100644 --- a/test/e2e/basic/backup-volume-info/csi_data_mover.go +++ b/test/e2e/basic/backup-volume-info/csi_data_mover.go @@ -61,6 +61,5 @@ func (c *CSIDataMoverVolumeInfo) Verify() error { Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true)) Expect(volumeInfo[0].SnapshotDataMovementInfo).NotTo(BeNil()) - // Clean SC and VSC - return c.cleanResource() + return nil } diff --git a/test/e2e/basic/backup-volume-info/csi_snapshot.go b/test/e2e/basic/backup-volume-info/csi_snapshot.go index 0bc3a7111b..ef476421c0 100644 --- a/test/e2e/basic/backup-volume-info/csi_snapshot.go +++ b/test/e2e/basic/backup-volume-info/csi_snapshot.go @@ -60,6 +60,5 @@ func (c *CSISnapshotVolumeInfo) Verify() error { Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true)) Expect(volumeInfo[0].CSISnapshotInfo).NotTo(BeNil()) - // Clean SC and VSC - return c.cleanResource() + return nil } diff --git a/test/e2e/basic/backup-volume-info/filesystem_upload.go b/test/e2e/basic/backup-volume-info/filesystem_upload.go index bab85ca126..d58eee5ada 100644 --- a/test/e2e/basic/backup-volume-info/filesystem_upload.go +++ b/test/e2e/basic/backup-volume-info/filesystem_upload.go @@ -60,6 +60,5 @@ func (f *FilesystemUploadVolumeInfo) Verify() error { Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true)) Expect(volumeInfo[0].PVBInfo).NotTo(BeNil()) - // Clean SC and VSC - return f.cleanResource() + return nil } diff --git a/test/e2e/basic/backup-volume-info/native_snapshot.go b/test/e2e/basic/backup-volume-info/native_snapshot.go index c8ec0be1b5..b4200f85ca 100644 --- a/test/e2e/basic/backup-volume-info/native_snapshot.go +++ b/test/e2e/basic/backup-volume-info/native_snapshot.go @@ -61,6 +61,5 @@ func (n *NativeSnapshotVolumeInfo) Verify() error { Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true)) Expect(volumeInfo[0].NativeSnapshotInfo).NotTo(BeNil()) - // Clean SC and VSC - return n.cleanResource() + return nil } diff --git a/test/e2e/basic/backup-volume-info/skipped_volumes.go b/test/e2e/basic/backup-volume-info/skipped_volumes.go index f6fc2e226b..0f1ccec0c2 100644 --- a/test/e2e/basic/backup-volume-info/skipped_volumes.go +++ b/test/e2e/basic/backup-volume-info/skipped_volumes.go @@ -60,6 +60,5 @@ func (s *SkippedVolumeInfo) Verify() error { Expect(len(volumeInfo) > 0).To(BeIdenticalTo(true)) Expect(volumeInfo[0].Skipped).To(BeIdenticalTo(true)) - // Clean SC and VSC - return s.cleanResource() + return nil } diff --git a/test/e2e/basic/pvc-selected-node-changing.go b/test/e2e/basic/pvc-selected-node-changing.go index 65806d04bc..e200929c11 100644 --- a/test/e2e/basic/pvc-selected-node-changing.go +++ b/test/e2e/basic/pvc-selected-node-changing.go @@ -68,14 +68,6 @@ func (p *PVCSelectedNodeChanging) CreateResources() error { fmt.Sprintf("Failed to create namespace %s", p.namespace)) }) - By(fmt.Sprintf("Create a storage class %s.", StorageClassName), func() { - Expect(InstallStorageClass(context.Background(), fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider))).To(Succeed()) - }) - - By(fmt.Sprintf("Create a storage class %s.", StorageClassName), func() { - Expect(InstallTestStorageClasses(fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class") - }) - By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() { nodeNameList, err := GetWorkerNodes(p.Ctx) Expect(err).To(Succeed()) diff --git a/test/e2e/basic/storage-class-changing.go b/test/e2e/basic/storage-class-changing.go index fc20d09f5b..e2e5a02967 100644 --- a/test/e2e/basic/storage-class-changing.go +++ b/test/e2e/basic/storage-class-changing.go @@ -18,7 +18,7 @@ type StorageClasssChanging struct { TestCase labels map[string]string data map[string]string - configmaptName string + cmName string namespace string srcStorageClass string desStorageClass string @@ -51,7 +51,7 @@ func (s *StorageClasssChanging) Init() error { s.labels = map[string]string{"velero.io/change-storage-class": "RestoreItemAction", "velero.io/plugin-config": ""} s.data = map[string]string{s.srcStorageClass: s.desStorageClass} - s.configmaptName = "change-storage-class-config" + s.cmName = "change-storage-class-config" s.volume = "volume-1" s.pvcName = fmt.Sprintf("pvc-%s", s.volume) s.podName = "pod-1" @@ -72,10 +72,6 @@ func (s *StorageClasssChanging) CreateResources() error { "app": "test", } - By(("Installing storage class..."), func() { - Expect(InstallTestStorageClasses(fmt.Sprintf("../testdata/storage-class/%s.yaml", s.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class") - }) - By(fmt.Sprintf("Create namespace %s", s.namespace), func() { Expect(CreateNamespace(s.Ctx, s.Client, s.namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", s.namespace)) @@ -94,8 +90,8 @@ func (s *StorageClasssChanging) CreateResources() error { Expect(err).To(Succeed()) }) - By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.configmaptName, s.VeleroCfg.VeleroNamespace), func() { - _, err := CreateConfigMap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.configmaptName, s.labels, s.data) + By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.cmName, s.VeleroCfg.VeleroNamespace), func() { + _, err := CreateConfigMap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.cmName, s.labels, s.data) Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", s.VeleroCfg.VeleroNamespace)) }) return nil @@ -149,8 +145,7 @@ func (s *StorageClasssChanging) Clean() error { Expect(CleanupNamespacesWithPoll(s.Ctx, s.Client, s.CaseBaseName)).To(Succeed(), fmt.Sprintf("Failed to delete namespace %s", s.CaseBaseName)) }) - DeleteConfigmap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.configmaptName) - DeleteStorageClass(s.Ctx, s.Client, s.desStorageClass) + DeleteConfigMap(s.Client.ClientGo, s.VeleroCfg.VeleroNamespace, s.cmName) s.TestCase.Clean() } diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 36cb0b0e5d..8e9e9bc0b4 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package e2e_test +package e2e import ( "context" @@ -22,6 +22,7 @@ import ( "flag" "fmt" "slices" + "strings" "testing" "time" @@ -29,7 +30,7 @@ import ( . "github.com/onsi/gomega" "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" - . "github.com/vmware-tanzu/velero/test" + "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/e2e/backup" . "github.com/vmware-tanzu/velero/test/e2e/backups" . "github.com/vmware-tanzu/velero/test/e2e/basic" @@ -48,62 +49,300 @@ import ( . "github.com/vmware-tanzu/velero/test/e2e/scale" . "github.com/vmware-tanzu/velero/test/e2e/schedule" . "github.com/vmware-tanzu/velero/test/e2e/upgrade" - . "github.com/vmware-tanzu/velero/test/util/k8s" - . "github.com/vmware-tanzu/velero/test/util/velero" + "github.com/vmware-tanzu/velero/test/util/k8s" + veleroutil "github.com/vmware-tanzu/velero/test/util/velero" ) func init() { - VeleroCfg.Options = install.Options{} - flag.StringVar(&VeleroCfg.CloudProvider, "cloud-provider", "", "cloud that Velero will be installed into. Required.") - flag.StringVar(&VeleroCfg.ObjectStoreProvider, "object-store-provider", "", "provider of object store plugin. Required if cloud-provider is kind, otherwise ignored.") - flag.StringVar(&VeleroCfg.BSLBucket, "bucket", "", "name of the object storage bucket where backups from e2e tests should be stored. Required.") - flag.StringVar(&VeleroCfg.CloudCredentialsFile, "credentials-file", "", "file containing credentials for backup and volume provider. Required.") - flag.StringVar(&VeleroCfg.VeleroCLI, "velerocli", "velero", "path to the velero application to use.") - flag.StringVar(&VeleroCfg.VeleroImage, "velero-image", "velero/velero:main", "image for the velero server to be tested.") - flag.StringVar(&VeleroCfg.Plugins, "plugins", "", "provider plugins to be tested.") - flag.StringVar(&VeleroCfg.AddBSLPlugins, "additional-bsl-plugins", "", "additional plugins to be tested.") - flag.StringVar(&VeleroCfg.VeleroVersion, "velero-version", "main", "image version for the velero server to be tested with.") - flag.StringVar(&VeleroCfg.RestoreHelperImage, "restore-helper-image", "", "image for the velero restore helper to be tested.") - flag.StringVar(&VeleroCfg.UpgradeFromVeleroCLI, "upgrade-from-velero-cli", "", "comma-separated list of velero application for the pre-upgrade velero server.") - flag.StringVar(&VeleroCfg.UpgradeFromVeleroVersion, "upgrade-from-velero-version", "v1.7.1", "comma-separated list of Velero version to be tested with for the pre-upgrade velero server.") - flag.StringVar(&VeleroCfg.MigrateFromVeleroCLI, "migrate-from-velero-cli", "", "comma-separated list of velero application on source cluster.") - flag.StringVar(&VeleroCfg.MigrateFromVeleroVersion, "migrate-from-velero-version", "self", "comma-separated list of Velero version to be tested with on source cluster.") - flag.StringVar(&VeleroCfg.BSLConfig, "bsl-config", "", "configuration to use for the backup storage location. Format is key1=value1,key2=value2") - flag.StringVar(&VeleroCfg.BSLPrefix, "prefix", "", "prefix under which all Velero data should be stored within the bucket. Optional.") - flag.StringVar(&VeleroCfg.VSLConfig, "vsl-config", "", "configuration to use for the volume snapshot location. Format is key1=value1,key2=value2") - flag.StringVar(&VeleroCfg.VeleroNamespace, "velero-namespace", "velero", "namespace to install Velero into") - flag.BoolVar(&InstallVelero, "install-velero", true, "install/uninstall velero during the test. Optional.") - flag.BoolVar(&VeleroCfg.UseNodeAgent, "use-node-agent", true, "whether deploy node agent daemonset velero during the test. Optional.") - flag.BoolVar(&VeleroCfg.UseVolumeSnapshots, "use-volume-snapshots", true, "whether or not to create snapshot location automatically. Set to false if you do not plan to create volume snapshots via a storage provider.") - flag.StringVar(&VeleroCfg.RegistryCredentialFile, "registry-credential-file", "", "file containing credential for the image registry, follows the same format rules as the ~/.docker/config.json file. Optional.") - flag.StringVar(&VeleroCfg.KibishiiDirectory, "kibishii-directory", "github.com/vmware-tanzu-experiments/distributed-data-generator/kubernetes/yaml/", "file directory or URL path to install Kibishii. Optional.") - //vmware-tanzu-experiments + test.VeleroCfg.Options = install.Options{} + flag.StringVar( + &test.VeleroCfg.CloudProvider, + "cloud-provider", + "", + "cloud that Velero will be installed into. Required.", + ) + flag.StringVar( + &test.VeleroCfg.ObjectStoreProvider, + "object-store-provider", + "", + "provider of object store plugin. Required if cloud-provider is kind, otherwise ignored.", + ) + flag.StringVar( + &test.VeleroCfg.BSLBucket, + "bucket", + "", + "name of the object storage bucket where backups from e2e tests should be stored. Required.", + ) + flag.StringVar( + &test.VeleroCfg.CloudCredentialsFile, + "credentials-file", + "", + "file containing credentials for backup and volume provider. Required.", + ) + flag.StringVar( + &test.VeleroCfg.VeleroCLI, + "velerocli", + "velero", + "path to the velero application to use.", + ) + flag.StringVar( + &test.VeleroCfg.VeleroImage, + "velero-image", + "velero/velero:main", + "image for the velero server to be tested.", + ) + flag.StringVar( + &test.VeleroCfg.Plugins, + "plugins", + "", + "provider plugins to be tested.", + ) + flag.StringVar( + &test.VeleroCfg.AddBSLPlugins, + "additional-bsl-plugins", + "", + "additional plugins to be tested.", + ) + flag.StringVar( + &test.VeleroCfg.VeleroVersion, + "velero-version", + "main", + "image version for the velero server to be tested with.", + ) + flag.StringVar( + &test.VeleroCfg.RestoreHelperImage, + "restore-helper-image", + "", + "image for the velero restore helper to be tested.", + ) + flag.StringVar( + &test.VeleroCfg.UpgradeFromVeleroCLI, + "upgrade-from-velero-cli", + "", + "comma-separated list of velero application for the pre-upgrade velero server.", + ) + flag.StringVar( + &test.VeleroCfg.UpgradeFromVeleroVersion, + "upgrade-from-velero-version", + "v1.7.1", + "comma-separated list of Velero version to be tested with for the pre-upgrade velero server.", + ) + flag.StringVar( + &test.VeleroCfg.MigrateFromVeleroCLI, + "migrate-from-velero-cli", + "", + "comma-separated list of velero application on source cluster.", + ) + flag.StringVar( + &test.VeleroCfg.MigrateFromVeleroVersion, + "migrate-from-velero-version", + "self", + "comma-separated list of Velero version to be tested with on source cluster.", + ) + flag.StringVar( + &test.VeleroCfg.BSLConfig, + "bsl-config", + "", "configuration to use for the backup storage location. Format is key1=value1,key2=value2") + flag.StringVar( + &test.VeleroCfg.BSLPrefix, + "prefix", + "", + "prefix under which all Velero data should be stored within the bucket. Optional.", + ) + flag.StringVar( + &test.VeleroCfg.VSLConfig, + "vsl-config", + "", + "configuration to use for the volume snapshot location. Format is key1=value1,key2=value2", + ) + flag.StringVar( + &test.VeleroCfg.VeleroNamespace, + "velero-namespace", + "velero", + "namespace to install Velero into", + ) + flag.BoolVar( + &test.InstallVelero, + "install-velero", + true, + "install/uninstall velero during the test. Optional.", + ) + flag.BoolVar( + &test.VeleroCfg.UseNodeAgent, + "use-node-agent", + true, + "whether deploy node agent daemonset velero during the test. Optional.", + ) + flag.BoolVar( + &test.VeleroCfg.UseVolumeSnapshots, + "use-volume-snapshots", + true, + "whether or not to create snapshot location automatically. Set to false if you do not plan to create volume snapshots via a storage provider.", + ) + flag.StringVar( + &test.VeleroCfg.RegistryCredentialFile, + "registry-credential-file", + "", + "file containing credential for the image registry, follows the same format rules as the ~/.docker/config.json file. Optional.", + ) + flag.StringVar( + &test.VeleroCfg.KibishiiDirectory, + "kibishii-directory", + "github.com/vmware-tanzu-experiments/distributed-data-generator/kubernetes/yaml/", + "file directory or URL path to install Kibishii. Optional.", + ) + // Flags to create an additional BSL for multiple credentials test - flag.StringVar(&VeleroCfg.AdditionalBSLProvider, "additional-bsl-object-store-provider", "", "provider of object store plugin for additional backup storage location. Required if testing multiple credentials support.") - flag.StringVar(&VeleroCfg.AdditionalBSLBucket, "additional-bsl-bucket", "", "name of the object storage bucket for additional backup storage location. Required if testing multiple credentials support.") - flag.StringVar(&VeleroCfg.AdditionalBSLPrefix, "additional-bsl-prefix", "", "prefix under which all Velero data should be stored within the bucket for additional backup storage location. Optional.") - flag.StringVar(&VeleroCfg.AdditionalBSLConfig, "additional-bsl-config", "", "configuration to use for the additional backup storage location. Format is key1=value1,key2=value2") - flag.StringVar(&VeleroCfg.AdditionalBSLCredentials, "additional-bsl-credentials-file", "", "file containing credentials for additional backup storage location provider. Required if testing multiple credentials support.") - flag.StringVar(&VeleroCfg.Features, "features", "", "comma-separated list of features to enable for this Velero process.") - flag.StringVar(&VeleroCfg.GCFrequency, "garbage-collection-frequency", "", "frequency of garbage collection.") - flag.StringVar(&VeleroCfg.DefaultClusterContext, "default-cluster-context", "", "default cluster's kube config context, it's for migration test.") - flag.StringVar(&VeleroCfg.StandbyClusterContext, "standby-cluster-context", "", "standby cluster's kube config context, it's for migration test.") - flag.StringVar(&VeleroCfg.UploaderType, "uploader-type", "", "type of uploader for persistent volume backup.") - flag.BoolVar(&VeleroCfg.VeleroServerDebugMode, "velero-server-debug-mode", false, "a switch for enable or disable having debug log of Velero server.") - flag.BoolVar(&VeleroCfg.SnapshotMoveData, "snapshot-move-data", false, "a Switch for taking backup with Velero's data mover, if data-mover-plugin is not provided, using built-in plugin") - flag.StringVar(&VeleroCfg.DataMoverPlugin, "data-mover-plugin", "", "customized plugin for data mover.") - flag.StringVar(&VeleroCfg.StandbyClusterCloudProvider, "standby-cluster-cloud-provider", "", "cloud provider for standby cluster.") - flag.StringVar(&VeleroCfg.StandbyClusterPlugins, "standby-cluster-plugins", "", "plugins provider for standby cluster.") - flag.StringVar(&VeleroCfg.StandbyClusterObjectStoreProvider, "standby-cluster-object-store-provider", "", "object store provider for standby cluster.") - flag.BoolVar(&VeleroCfg.DebugVeleroPodRestart, "debug-velero-pod-restart", false, "a switch for debugging velero pod restart.") - flag.BoolVar(&VeleroCfg.DisableInformerCache, "disable-informer-cache", false, "a switch for disable informer cache.") - flag.StringVar(&VeleroCfg.DefaultClusterName, "default-cluster-name", "", "default cluster's name in kube config file, it's for EKS IRSA test.") - flag.StringVar(&VeleroCfg.StandbyClusterName, "standby-cluster-name", "", "standby cluster's name in kube config file, it's for EKS IRSA test.") - flag.StringVar(&VeleroCfg.EKSPolicyARN, "eks-policy-arn", "", "EKS plicy ARN for creating AWS IAM service account.") - flag.StringVar(&VeleroCfg.DefaultCLSServiceAccountName, "default-cls-service-account-name", "", "default cluster service account name.") - flag.StringVar(&VeleroCfg.StandbyCLSServiceAccountName, "standby-cls-service-account-name", "", "standby cluster service account name.") - flag.BoolVar(&VeleroCfg.FailFast, "fail-fast", true, "a switch for failing fast on meeting error.") - flag.BoolVar(&VeleroCfg.HasVspherePlugin, "has-vsphere-plugin", false, "a switch for installing vSphere plugin.") + flag.StringVar( + &test.VeleroCfg.AdditionalBSLProvider, + "additional-bsl-object-store-provider", + "", + "provider of object store plugin for additional backup storage location. Required if testing multiple credentials support.", + ) + flag.StringVar( + &test.VeleroCfg.AdditionalBSLBucket, + "additional-bsl-bucket", + "", + "name of the object storage bucket for additional backup storage location. Required if testing multiple credentials support.", + ) + flag.StringVar( + &test.VeleroCfg.AdditionalBSLPrefix, + "additional-bsl-prefix", + "", + "prefix under which all Velero data should be stored within the bucket for additional backup storage location. Optional.", + ) + flag.StringVar( + &test.VeleroCfg.AdditionalBSLConfig, + "additional-bsl-config", + "", + "configuration to use for the additional backup storage location. Format is key1=value1,key2=value2", + ) + flag.StringVar( + &test.VeleroCfg.AdditionalBSLCredentials, + "additional-bsl-credentials-file", + "", + "file containing credentials for additional backup storage location provider. Required if testing multiple credentials support.", + ) + flag.StringVar( + &test.VeleroCfg.Features, + "features", + "", + "comma-separated list of features to enable for this Velero process.", + ) + flag.StringVar( + &test.VeleroCfg.GCFrequency, + "garbage-collection-frequency", + "", + "frequency of garbage collection.", + ) + flag.StringVar( + &test.VeleroCfg.DefaultClusterContext, + "default-cluster-context", + "", + "default cluster's kube config context, it's for migration test.", + ) + flag.StringVar( + &test.VeleroCfg.StandbyClusterContext, + "standby-cluster-context", + "", + "standby cluster's kube config context, it's for migration test.", + ) + flag.StringVar( + &test.VeleroCfg.UploaderType, + "uploader-type", + "", + "type of uploader for persistent volume backup.", + ) + flag.BoolVar( + &test.VeleroCfg.VeleroServerDebugMode, + "velero-server-debug-mode", + false, + "a switch for enable or disable having debug log of Velero server.", + ) + flag.BoolVar( + &test.VeleroCfg.SnapshotMoveData, + "snapshot-move-data", + false, + "a Switch for taking backup with Velero's data mover, if data-mover-plugin is not provided, using built-in plugin", + ) + flag.StringVar( + &test.VeleroCfg.DataMoverPlugin, + "data-mover-plugin", + "", + "customized plugin for data mover.", + ) + flag.StringVar( + &test.VeleroCfg.StandbyClusterCloudProvider, + "standby-cluster-cloud-provider", + "", + "cloud provider for standby cluster.", + ) + flag.StringVar( + &test.VeleroCfg.StandbyClusterPlugins, + "standby-cluster-plugins", + "", + "plugins provider for standby cluster.", + ) + flag.StringVar( + &test.VeleroCfg.StandbyClusterObjectStoreProvider, + "standby-cluster-object-store-provider", + "", + "object store provider for standby cluster.", + ) + flag.BoolVar( + &test.VeleroCfg.DebugVeleroPodRestart, + "debug-velero-pod-restart", + false, + "a switch for debugging velero pod restart.", + ) + flag.BoolVar( + &test.VeleroCfg.DisableInformerCache, + "disable-informer-cache", + false, + "a switch for disable informer cache.", + ) + flag.StringVar( + &test.VeleroCfg.DefaultClusterName, + "default-cluster-name", + "", + "default cluster's name in kube config file, it's for EKS IRSA test.", + ) + flag.StringVar( + &test.VeleroCfg.StandbyClusterName, + "standby-cluster-name", + "", + "standby cluster's name in kube config file, it's for EKS IRSA test.", + ) + flag.StringVar( + &test.VeleroCfg.EKSPolicyARN, + "eks-policy-arn", + "", + "EKS plicy ARN for creating AWS IAM service account.", + ) + flag.StringVar( + &test.VeleroCfg.DefaultCLSServiceAccountName, + "default-cls-service-account-name", + "", + "default cluster service account name.", + ) + flag.StringVar( + &test.VeleroCfg.StandbyCLSServiceAccountName, + "standby-cls-service-account-name", + "", + "standby cluster service account name.", + ) + flag.BoolVar( + &test.VeleroCfg.FailFast, + "fail-fast", + true, + "a switch for failing fast on meeting error.", + ) + flag.BoolVar( + &test.VeleroCfg.HasVspherePlugin, + "has-vsphere-plugin", + false, + "a switch for installing vSphere plugin.", + ) } // Add label [SkipVanillaZfs]: @@ -113,147 +352,302 @@ func init() { // caused by no expected snapshot found. If we use retain as reclaim policy, then this label can be ignored, all test // cases can be executed as expected successful result. -var _ = Describe("Velero tests with various CRD API group versions", - Label("APIGroup", "APIVersion", "SKIP_KIND", "LongTime"), APIGroupVersionsTest) -var _ = Describe("CRD of apiextentions v1beta1 should be B/R successfully from cluster(k8s version < 1.22) to cluster(k8s version >= 1.22)", - Label("APIGroup", "APIExtensions", "SKIP_KIND"), APIExtensionsVersionsTest) +var _ = Describe( + "Velero tests with various CRD API group versions", + Label("APIGroup", "APIVersion", "SKIP_KIND", "LongTime"), + APIGroupVersionsTest, +) +var _ = Describe( + "CRD of apiextentions v1beta1 should be B/R successfully from cluster(k8s version < 1.22) to cluster(k8s version >= 1.22)", + Label("APIGroup", "APIExtensions", "SKIP_KIND"), + APIExtensionsVersionsTest, +) // Test backup and restore of Kibishii using restic -var _ = Describe("Velero tests on cluster using the plugin provider for object storage and Restic for volume backups", - Label("Basic", "Restic"), BackupRestoreWithRestic) +var _ = Describe( + "Velero tests on cluster using the plugin provider for object storage and Restic for volume backups", + Label("Basic", "Restic"), + BackupRestoreWithRestic, +) -var _ = Describe("Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", - Label("Basic", "Snapshot", "SkipVanillaZfs"), BackupRestoreWithSnapshots) +var _ = Describe( + "Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", + Label("Basic", "Snapshot", "SkipVanillaZfs"), + BackupRestoreWithSnapshots, +) -var _ = Describe("Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", - Label("Basic", "Snapshot", "RetainPV"), BackupRestoreRetainedPVWithSnapshots) +var _ = Describe( + "Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", + Label("Basic", "Snapshot", "RetainPV"), + BackupRestoreRetainedPVWithSnapshots, +) -var _ = Describe("Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", - Label("Basic", "Restic", "RetainPV"), BackupRestoreRetainedPVWithRestic) +var _ = Describe( + "Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", + Label("Basic", "Restic", "RetainPV"), + BackupRestoreRetainedPVWithRestic, +) -var _ = Describe("Backup/restore of cluster resources", - Label("Basic", "ClusterResource"), ResourcesCheckTest) +var _ = Describe( + "Backup/restore of cluster resources", + Label("Basic", "ClusterResource"), + ResourcesCheckTest, +) -var _ = Describe("Service NodePort reservation during restore is configurable", - Label("Basic", "NodePort"), NodePortTest) +var _ = Describe( + "Service NodePort reservation during restore is configurable", + Label("Basic", "NodePort"), + NodePortTest, +) -var _ = Describe("Storage class of persistent volumes and persistent volume claims can be changed during restores", - Label("Basic", "StorageClass"), StorageClasssChangingTest) +var _ = Describe( + "Storage class of persistent volumes and persistent volume claims can be changed during restores", + Label("Basic", "StorageClass"), + StorageClasssChangingTest, +) -var _ = Describe("Node selectors of persistent volume claims can be changed during restores", - Label("Basic", "SelectedNode", "SKIP_KIND"), PVCSelectedNodeChangingTest) +var _ = Describe( + "Node selectors of persistent volume claims can be changed during restores", + Label("Basic", "SelectedNode", "SKIP_KIND"), + PVCSelectedNodeChangingTest, +) -var _ = Describe("Backup/restore of 2500 namespaces", - Label("Scale", "LongTime"), MultiNSBackupRestore) +var _ = Describe( + "Backup/restore of 2500 namespaces", + Label("Scale", "LongTime"), + MultiNSBackupRestore, +) // Upgrade test by Kibishii using Restic -var _ = Describe("Velero upgrade tests on cluster using the plugin provider for object storage and Restic for volume backups", - Label("Upgrade", "Restic"), BackupUpgradeRestoreWithRestic) -var _ = Describe("Velero upgrade tests on cluster using the plugin provider for object storage and snapshots for volume backups", - Label("Upgrade", "Snapshot", "SkipVanillaZfs"), BackupUpgradeRestoreWithSnapshots) +var _ = Describe( + "Velero upgrade tests on cluster using the plugin provider for object storage and Restic for volume backups", + Label("Upgrade", "Restic"), + BackupUpgradeRestoreWithRestic, +) +var _ = Describe( + "Velero upgrade tests on cluster using the plugin provider for object storage and snapshots for volume backups", + Label("Upgrade", "Snapshot", "SkipVanillaZfs"), + BackupUpgradeRestoreWithSnapshots, +) // test filter objects by namespace, type, or labels when backup or restore. -var _ = Describe("Resources with the label velero.io/exclude-from-backup=true are not included in backup", - Label("ResourceFiltering", "ExcludeFromBackup"), ExcludeFromBackupTest) -var _ = Describe("Velero test on exclude namespace from the cluster backup", - Label("ResourceFiltering", "ExcludeNamespaces", "Backup"), BackupWithExcludeNamespaces) -var _ = Describe("Velero test on exclude namespace from the cluster restore", - Label("ResourceFiltering", "ExcludeNamespaces", "Restore"), RestoreWithExcludeNamespaces) -var _ = Describe("Velero test on exclude resources from the cluster backup", - Label("ResourceFiltering", "ExcludeResources", "Backup"), BackupWithExcludeResources) -var _ = Describe("Velero test on exclude resources from the cluster restore", - Label("ResourceFiltering", "ExcludeResources", "Restore"), RestoreWithExcludeResources) -var _ = Describe("Velero test on include namespace from the cluster backup", - Label("ResourceFiltering", "IncludeNamespaces", "Backup"), BackupWithIncludeNamespaces) -var _ = Describe("Velero test on include namespace from the cluster restore", - Label("ResourceFiltering", "IncludeNamespaces", "Restore"), RestoreWithIncludeNamespaces) -var _ = Describe("Velero test on include resources from the cluster backup", - Label("ResourceFiltering", "IncludeResources", "Backup"), BackupWithIncludeResources) -var _ = Describe("Velero test on include resources from the cluster restore", - Label("ResourceFiltering", "IncludeResources", "Restore"), RestoreWithIncludeResources) -var _ = Describe("Velero test on backup include resources matching the label selector", - Label("ResourceFiltering", "LabelSelector"), BackupWithLabelSelector) -var _ = Describe("Velero test on skip backup of volume by resource policies", - Label("ResourceFiltering", "ResourcePolicies", "Restic"), ResourcePoliciesTest) +var _ = Describe( + "Resources with the label velero.io/exclude-from-backup=true are not included in backup", + Label("ResourceFiltering", "ExcludeFromBackup"), + ExcludeFromBackupTest, +) +var _ = Describe( + "Velero test on exclude namespace from the cluster backup", + Label("ResourceFiltering", "ExcludeNamespaces", "Backup"), + BackupWithExcludeNamespaces, +) +var _ = Describe( + "Velero test on exclude namespace from the cluster restore", + Label("ResourceFiltering", "ExcludeNamespaces", "Restore"), + RestoreWithExcludeNamespaces, +) +var _ = Describe( + "Velero test on exclude resources from the cluster backup", + Label("ResourceFiltering", "ExcludeResources", "Backup"), + BackupWithExcludeResources, +) +var _ = Describe( + "Velero test on exclude resources from the cluster restore", + Label("ResourceFiltering", "ExcludeResources", "Restore"), + RestoreWithExcludeResources, +) +var _ = Describe( + "Velero test on include namespace from the cluster backup", + Label("ResourceFiltering", "IncludeNamespaces", "Backup"), + BackupWithIncludeNamespaces, +) +var _ = Describe( + "Velero test on include namespace from the cluster restore", + Label("ResourceFiltering", "IncludeNamespaces", "Restore"), + RestoreWithIncludeNamespaces, +) +var _ = Describe( + "Velero test on include resources from the cluster backup", + Label("ResourceFiltering", "IncludeResources", "Backup"), + BackupWithIncludeResources, +) +var _ = Describe( + "Velero test on include resources from the cluster restore", + Label("ResourceFiltering", "IncludeResources", "Restore"), + RestoreWithIncludeResources, +) +var _ = Describe( + "Velero test on backup include resources matching the label selector", + Label("ResourceFiltering", "LabelSelector"), + BackupWithLabelSelector, +) +var _ = Describe( + "Velero test on skip backup of volume by resource policies", + Label("ResourceFiltering", "ResourcePolicies", "Restic"), + ResourcePoliciesTest, +) // backup VolumeInfo test -var _ = Describe("", Label("BackupVolumeInfo", "SkippedVolume"), SkippedVolumeInfoTest) -var _ = Describe("", Label("BackupVolumeInfo", "FilesystemUpload"), FilesystemUploadVolumeInfoTest) -var _ = Describe("", Label("BackupVolumeInfo", "CSIDataMover"), CSIDataMoverVolumeInfoTest) -var _ = Describe("", Label("BackupVolumeInfo", "CSISnapshot"), CSISnapshotVolumeInfoTest) -var _ = Describe("", Label("BackupVolumeInfo", "NativeSnapshot"), NativeSnapshotVolumeInfoTest) - -var _ = Describe("Velero test on resource modifiers from the cluster restore", - Label("ResourceModifier", "Restore"), ResourceModifiersTest) - -var _ = Describe("Velero tests of Restic backup deletion", - Label("Backups", "Deletion", "Restic"), BackupDeletionWithRestic) -var _ = Describe("Velero tests of snapshot backup deletion", - Label("Backups", "Deletion", "Snapshot", "SkipVanillaZfs"), BackupDeletionWithSnapshots) -var _ = Describe("Local backups and Restic repos will be deleted once the corresponding backup storage location is deleted", - Label("Backups", "TTL", "LongTime", "Snapshot", "SkipVanillaZfs"), TTLTest) -var _ = Describe("Backups in object storage are synced to a new Velero and deleted backups in object storage are synced to be deleted in Velero", - Label("Backups", "BackupsSync"), BackupsSyncTest) - -var _ = Describe("Backup will be created periodically by schedule defined by a Cron expression", - Label("Schedule", "BR", "Pause", "LongTime"), ScheduleBackupTest) -var _ = Describe("Backup resources should follow the specific order in schedule", - Label("Schedule", "OrderedResources", "LongTime"), ScheduleOrderedResources) -var _ = Describe("Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", - Label("Schedule", "BackupCreation", "SKIP_KIND", "LongTime"), ScheduleBackupCreationTest) - -var _ = Describe("Velero test on ssr object when controller namespace mix-ups", - Label("PrivilegesMgmt", "SSR"), SSRTest) - -var _ = Describe("Local backups will be deleted once the corresponding backup storage location is deleted", - Label("BSL", "Deletion", "Snapshot", "SkipVanillaZfs"), BslDeletionWithSnapshots) -var _ = Describe("Local backups and Restic repos will be deleted once the corresponding backup storage location is deleted", - Label("BSL", "Deletion", "Restic"), BslDeletionWithRestic) - -var _ = Describe("Migrate resources between clusters by Restic", - Label("Migration", "Restic"), MigrationWithRestic) -var _ = Describe("Migrate resources between clusters by snapshot", - Label("Migration", "Snapshot", "SkipVanillaZfs"), MigrationWithSnapshots) - -var _ = Describe("Backup resources should follow the specific order in schedule", - Label("NamespaceMapping", "Single", "Restic"), OneNamespaceMappingResticTest) -var _ = Describe("Backup resources should follow the specific order in schedule", - Label("NamespaceMapping", "Multiple", "Restic"), MultiNamespacesMappingResticTest) -var _ = Describe("Backup resources should follow the specific order in schedule", - Label("NamespaceMapping", "Single", "Snapshot", "SkipVanillaZfs"), OneNamespaceMappingSnapshotTest) -var _ = Describe("Backup resources should follow the specific order in schedule", - Label("NamespaceMapping", "Multiple", "Snapshot", "SkipVanillaZfs"), MultiNamespacesMappingSnapshotTest) - -var _ = Describe("Backup resources should follow the specific order in schedule", - Label("PVBackup", "OptIn"), OptInPVBackupTest) -var _ = Describe("Backup resources should follow the specific order in schedule", - Label("PVBackup", "OptOut"), OptOutPVBackupTest) - -var _ = Describe("Velero test on parallel files upload", - Label("UploaderConfig", "ParallelFilesUpload"), ParallelFilesUploadTest) -var _ = Describe("Velero test on parallel files download", - Label("UploaderConfig", "ParallelFilesDownload"), ParallelFilesDownloadTest) +var _ = Describe( + "", + Label("BackupVolumeInfo", "SkippedVolume"), + SkippedVolumeInfoTest, +) +var _ = Describe( + "", + Label("BackupVolumeInfo", "FilesystemUpload"), + FilesystemUploadVolumeInfoTest, +) +var _ = Describe( + "", + Label("BackupVolumeInfo", "CSIDataMover"), + CSIDataMoverVolumeInfoTest, +) +var _ = Describe( + "", + Label("BackupVolumeInfo", "CSISnapshot"), + CSISnapshotVolumeInfoTest, +) +var _ = Describe( + "", + Label("BackupVolumeInfo", "NativeSnapshot"), + NativeSnapshotVolumeInfoTest, +) + +var _ = Describe( + "Velero test on resource modifiers from the cluster restore", + Label("ResourceModifier", "Restore"), + ResourceModifiersTest, +) + +var _ = Describe( + "Velero tests of Restic backup deletion", + Label("Backups", "Deletion", "Restic"), + BackupDeletionWithRestic, +) +var _ = Describe( + "Velero tests of snapshot backup deletion", + Label("Backups", "Deletion", "Snapshot", "SkipVanillaZfs"), + BackupDeletionWithSnapshots, +) +var _ = Describe( + "Local backups and Restic repos will be deleted once the corresponding backup storage location is deleted", + Label("Backups", "TTL", "LongTime", "Snapshot", "SkipVanillaZfs"), + TTLTest, +) +var _ = Describe( + "Backups in object storage are synced to a new Velero and deleted backups in object storage are synced to be deleted in Velero", + Label("Backups", "BackupsSync"), + BackupsSyncTest, +) + +var _ = Describe( + "Backup will be created periodically by schedule defined by a Cron expression", + Label("Schedule", "Periodical", "Pause", "LongTime"), + SchedulePeriodicalTest, +) +var _ = Describe( + "Backup resources should follow the specific order in schedule", + Label("Schedule", "OrderedResources"), + ScheduleOrderedResources, +) +var _ = Describe( + "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", + Label("Schedule", "InProgress", "SKIP_KIND", "LongTime"), + ScheduleInProgressTest, +) + +var _ = Describe( + "Velero test on ssr object when controller namespace mix-ups", + Label("PrivilegesMgmt", "SSR"), + SSRTest, +) + +var _ = Describe( + "Local backups will be deleted once the corresponding backup storage location is deleted", + Label("BSL", "Deletion", "Snapshot", "SkipVanillaZfs"), + BslDeletionWithSnapshots, +) +var _ = Describe( + "Local backups and Restic repos will be deleted once the corresponding backup storage location is deleted", + Label("BSL", "Deletion", "Restic"), + BslDeletionWithRestic, +) + +var _ = Describe( + "Migrate resources between clusters by Restic", + Label("Migration", "Restic"), + MigrationWithRestic, +) +var _ = Describe( + "Migrate resources between clusters by snapshot", + Label("Migration", "Snapshot", "SkipVanillaZfs"), + MigrationWithSnapshots, +) + +var _ = Describe( + "Backup resources should follow the specific order in schedule", + Label("NamespaceMapping", "Single", "Restic"), + OneNamespaceMappingResticTest, +) +var _ = Describe( + "Backup resources should follow the specific order in schedule", + Label("NamespaceMapping", "Multiple", "Restic"), + MultiNamespacesMappingResticTest, +) +var _ = Describe( + "Backup resources should follow the specific order in schedule", + Label("NamespaceMapping", "Single", "Snapshot", "SkipVanillaZfs"), + OneNamespaceMappingSnapshotTest, +) +var _ = Describe( + "Backup resources should follow the specific order in schedule", + Label("NamespaceMapping", "Multiple", "Snapshot", "SkipVanillaZfs"), + MultiNamespacesMappingSnapshotTest, +) + +var _ = Describe( + "Backup resources should follow the specific order in schedule", + Label("PVBackup", "OptIn"), + OptInPVBackupTest, +) +var _ = Describe( + "Backup resources should follow the specific order in schedule", + Label("PVBackup", "OptOut"), + OptOutPVBackupTest, +) + +var _ = Describe( + "Velero test on parallel files upload", + Label("UploaderConfig", "ParallelFilesUpload"), + ParallelFilesUploadTest, +) +var _ = Describe( + "Velero test on parallel files download", + Label("UploaderConfig", "ParallelFilesDownload"), + ParallelFilesDownloadTest, +) func GetKubeConfigContext() error { var err error - var tcDefault, tcStandby TestClient - tcDefault, err = NewTestClient(VeleroCfg.DefaultClusterContext) - VeleroCfg.DefaultClient = &tcDefault - VeleroCfg.ClientToInstallVelero = VeleroCfg.DefaultClient - VeleroCfg.ClusterToInstallVelero = VeleroCfg.DefaultClusterName - VeleroCfg.ServiceAccountNameToInstall = VeleroCfg.DefaultCLSServiceAccountName + var tcDefault, tcStandby k8s.TestClient + tcDefault, err = k8s.NewTestClient(test.VeleroCfg.DefaultClusterContext) + test.VeleroCfg.DefaultClient = &tcDefault + test.VeleroCfg.ClientToInstallVelero = test.VeleroCfg.DefaultClient + test.VeleroCfg.ClusterToInstallVelero = test.VeleroCfg.DefaultClusterName + test.VeleroCfg.ServiceAccountNameToInstall = test.VeleroCfg.DefaultCLSServiceAccountName if err != nil { return err } - if VeleroCfg.DefaultClusterContext != "" { - err = KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultClusterContext) + if test.VeleroCfg.DefaultClusterContext != "" { + err = k8s.KubectlConfigUseContext(context.Background(), test.VeleroCfg.DefaultClusterContext) if err != nil { return err } - if VeleroCfg.StandbyClusterContext != "" { - tcStandby, err = NewTestClient(VeleroCfg.StandbyClusterContext) - VeleroCfg.StandbyClient = &tcStandby + if test.VeleroCfg.StandbyClusterContext != "" { + tcStandby, err = k8s.NewTestClient(test.VeleroCfg.StandbyClusterContext) + test.VeleroCfg.StandbyClient = &tcStandby if err != nil { return err } @@ -275,14 +669,14 @@ func TestE2e(t *testing.T) { t.Skip("Skipping E2E tests") } - if !slices.Contains(LocalCloudProviders, VeleroCfg.CloudProvider) { + if !slices.Contains(test.LocalCloudProviders, test.VeleroCfg.CloudProvider) { fmt.Println("For cloud platforms, object store plugin provider will be set as cloud provider") // If ObjectStoreProvider is not provided, then using the value same as CloudProvider - if VeleroCfg.ObjectStoreProvider == "" { - VeleroCfg.ObjectStoreProvider = VeleroCfg.CloudProvider + if test.VeleroCfg.ObjectStoreProvider == "" { + test.VeleroCfg.ObjectStoreProvider = test.VeleroCfg.CloudProvider } } else { - if VeleroCfg.ObjectStoreProvider == "" { + if test.VeleroCfg.ObjectStoreProvider == "" { t.Error(errors.New("No object store provider specified - must be specified when using kind as the cloud provider")) // Must have an object store provider } } @@ -298,19 +692,67 @@ func TestE2e(t *testing.T) { } var _ = BeforeSuite(func() { - if InstallVelero { + By("Install StorageClass for E2E.") + Expect(veleroutil.InstallStorageClasses(test.VeleroCfg.CloudProvider)).To(Succeed()) + + if strings.EqualFold(test.VeleroCfg.Features, test.FeatureCSI) && + test.VeleroCfg.UseVolumeSnapshots { + By("Install VolumeSnapshotClass for E2E.") + Expect( + k8s.KubectlApplyByFile( + context.Background(), + fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml", test.VeleroCfg.CloudProvider), + ), + ).To(Succeed()) + } + + if test.InstallVelero { By("Install test resources before testing") - Expect(PrepareVelero(context.Background(), "install resource before testing", VeleroCfg)).To(Succeed()) + Expect( + veleroutil.PrepareVelero( + context.Background(), + "install resource before testing", + test.VeleroCfg, + ), + ).To(Succeed()) } }) var _ = AfterSuite(func() { + ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) + defer ctxCancel() + + By("Delete StorageClasses created by E2E") + Expect( + k8s.DeleteStorageClass( + ctx, + *test.VeleroCfg.ClientToInstallVelero, + test.StorageClassName, + ), + ).To(Succeed()) + Expect( + k8s.DeleteStorageClass( + ctx, + *test.VeleroCfg.ClientToInstallVelero, + test.StorageClassName2, + ), + ).To(Succeed()) + + if strings.EqualFold(test.VeleroCfg.Features, test.FeatureCSI) && + test.VeleroCfg.UseVolumeSnapshots { + By("Delete VolumeSnapshotClass created by E2E") + Expect( + k8s.KubectlDeleteByFile( + ctx, + fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml", test.VeleroCfg.CloudProvider), + ), + ).To(Succeed()) + } + // If the Velero is installed during test, and the FailFast is not enabled, // uninstall Velero. If not, either Velero is not installed, or kept it for debug on failure. - if InstallVelero && (testSuitePassed || !VeleroCfg.FailFast) { + if test.InstallVelero && (testSuitePassed || !test.VeleroCfg.FailFast) { By("release test resources after testing") - ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) - defer ctxCancel() - Expect(VeleroUninstall(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed()) + Expect(veleroutil.VeleroUninstall(ctx, test.VeleroCfg)).To(Succeed()) } }) diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index ade423b76b..1604806208 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -75,8 +75,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) By("Uninstall Velero", func() { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) }) } }) @@ -87,28 +86,72 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) By(fmt.Sprintf("Uninstall Velero on cluster %s", veleroCfg.DefaultClusterContext), func() { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() + Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultClusterContext)).To(Succeed()) - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) - DeleteNamespace(context.Background(), *veleroCfg.DefaultClient, migrationNamespace, true) + veleroCfg.ClientToInstallVelero = veleroCfg.DefaultClient + veleroCfg.ClusterToInstallVelero = veleroCfg.DefaultClusterName + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) + + By(fmt.Sprintf("Delete sample workload namespace %s", migrationNamespace), func() { + Expect( + DeleteNamespace( + context.Background(), + *veleroCfg.DefaultClient, + migrationNamespace, + true), + ).To(Succeed()) + }) }) By(fmt.Sprintf("Uninstall Velero on cluster %s", veleroCfg.StandbyClusterContext), func() { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyClusterContext)).To(Succeed()) - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) - DeleteNamespace(context.Background(), *veleroCfg.StandbyClient, migrationNamespace, true) - }) + veleroCfg.ClientToInstallVelero = veleroCfg.StandbyClient + veleroCfg.ClusterToInstallVelero = veleroCfg.StandbyClusterName + + By("Delete StorageClasses created by E2E") + Expect( + DeleteStorageClass( + ctx, + *veleroCfg.ClientToInstallVelero, + StorageClassName, + ), + ).To(Succeed()) + Expect( + DeleteStorageClass( + ctx, + *veleroCfg.ClientToInstallVelero, + StorageClassName2, + ), + ).To(Succeed()) + + if strings.EqualFold(veleroCfg.Features, FeatureCSI) && + veleroCfg.UseVolumeSnapshots { + By("Delete VolumeSnapshotClass created by E2E") + Expect( + KubectlDeleteByFile( + ctx, + fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml", veleroCfg.CloudProvider), + ), + ).To(Succeed()) + } + + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) - if InstallVelero { By(fmt.Sprintf("Delete sample workload namespace %s", migrationNamespace), func() { - DeleteNamespace(context.Background(), *veleroCfg.StandbyClient, migrationNamespace, true) + Expect( + DeleteNamespace( + context.Background(), + *veleroCfg.StandbyClient, + migrationNamespace, + true, + ), + ).To(Succeed()) }) - } + }) - By(fmt.Sprintf("Switch to default kubeconfig context %s", veleroCfg.DefaultClusterContext), func() { + By(fmt.Sprintf("Switch to default KubeConfig context %s", veleroCfg.DefaultClusterContext), func() { Expect(KubectlConfigUseContext(context.Background(), veleroCfg.DefaultClusterContext)).To(Succeed()) veleroCfg.ClientToInstallVelero = veleroCfg.DefaultClient veleroCfg.ClusterToInstallVelero = veleroCfg.DefaultClusterName @@ -297,6 +340,20 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) veleroCfg.ObjectStoreProvider = veleroCfg.StandbyClusterObjectStoreProvider } + By("Install StorageClass for E2E.") + Expect(InstallStorageClasses(veleroCfg.StandbyClusterCloudProvider)).To(Succeed()) + + if strings.EqualFold(veleroCfg.Features, FeatureCSI) && + veleroCfg.UseVolumeSnapshots { + By("Install VolumeSnapshotClass for E2E.") + Expect( + KubectlApplyByFile( + context.Background(), + fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml", veleroCfg.StandbyClusterCloudProvider), + ), + ).To(Succeed()) + } + Expect(VeleroInstall(context.Background(), &veleroCfg, true)).To(Succeed()) }) @@ -307,16 +364,13 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) By(fmt.Sprintf("Restore %s", migrationNamespace), func() { if OriginVeleroCfg.SnapshotMoveData { - By(fmt.Sprintf("Create a storage class %s for restore PV provisioned by storage class %s on different cloud provider", StorageClassName, KibishiiStorageClassName), func() { - Expect(InstallStorageClass(context.Background(), fmt.Sprintf("../testdata/storage-class/%s.yaml", veleroCfg.StandbyClusterCloudProvider))).To(Succeed()) - }) - configmaptName := "datamover-storage-class-config" + cmName := "datamover-storage-class-config" labels := map[string]string{"velero.io/change-storage-class": "RestoreItemAction", "velero.io/plugin-config": ""} data := map[string]string{KibishiiStorageClassName: StorageClassName} - By(fmt.Sprintf("Create ConfigMap %s in namespace %s", configmaptName, veleroCfg.VeleroNamespace), func() { - _, err := CreateConfigMap(veleroCfg.StandbyClient.ClientGo, veleroCfg.VeleroNamespace, configmaptName, labels, data) + By(fmt.Sprintf("Create ConfigMap %s in namespace %s", cmName, veleroCfg.VeleroNamespace), func() { + _, err := CreateConfigMap(veleroCfg.StandbyClient.ClientGo, veleroCfg.VeleroNamespace, cmName, labels, data) Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", veleroCfg.VeleroNamespace)) }) } else { @@ -343,7 +397,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) // TODO: delete backup created by case self, not all By("Clean backups after test", func() { veleroCfg.ClientToInstallVelero = veleroCfg.DefaultClient - DeleteBackups(context.Background(), backupNames, &veleroCfg) + Expect(DeleteBackups(context.Background(), backupNames, &veleroCfg)).To(Succeed()) }) }) }) diff --git a/test/e2e/parallelfilesdownload/parallel_files_download.go b/test/e2e/parallelfilesdownload/parallel_files_download.go index bc77dffc8c..a678c6c80e 100644 --- a/test/e2e/parallelfilesdownload/parallel_files_download.go +++ b/test/e2e/parallelfilesdownload/parallel_files_download.go @@ -21,7 +21,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/pkg/errors" . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/e2e/test" @@ -90,11 +89,6 @@ func (p *ParallelFilesDownload) Init() error { } func (p *ParallelFilesDownload) CreateResources() error { - err := InstallStorageClass(p.Ctx, fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider)) - if err != nil { - return errors.Wrapf(err, "failed to install storage class for pv backup filtering test") - } - By(fmt.Sprintf("Create namespace %s", p.namespace), func() { Expect(CreateNamespace(p.Ctx, p.Client, p.namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", p.namespace)) diff --git a/test/e2e/parallelfilesupload/parallel_files_upload.go b/test/e2e/parallelfilesupload/parallel_files_upload.go index 6af813f985..5478e35186 100644 --- a/test/e2e/parallelfilesupload/parallel_files_upload.go +++ b/test/e2e/parallelfilesupload/parallel_files_upload.go @@ -21,7 +21,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/pkg/errors" . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/e2e/test" @@ -81,11 +80,6 @@ func (p *ParallelFilesUpload) Init() error { } func (p *ParallelFilesUpload) CreateResources() error { - err := InstallStorageClass(p.Ctx, fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider)) - if err != nil { - return errors.Wrapf(err, "failed to install storage class for pv backup filtering test") - } - By(fmt.Sprintf("Create namespace %s", p.namespace), func() { Expect(CreateNamespace(p.Ctx, p.Client, p.namespace)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", p.namespace)) diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go index d89688c4f5..b58cb8a55b 100644 --- a/test/e2e/pv-backup/pv-backup-filter.go +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -63,11 +63,6 @@ func (p *PVBackupFiltering) Init() error { } func (p *PVBackupFiltering) CreateResources() error { - err := InstallStorageClass(p.Ctx, fmt.Sprintf("../testdata/storage-class/%s.yaml", p.VeleroCfg.CloudProvider)) - if err != nil { - return errors.Wrapf(err, "failed to install storage class for pv backup filtering test") - } - for _, ns := range *p.NSIncluded { By(fmt.Sprintf("Create namespaces %s for workload\n", ns), func() { Expect(CreateNamespace(p.Ctx, p.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) diff --git a/test/e2e/resource-filtering/exclude_label.go b/test/e2e/resource-filtering/exclude_label.go index 0dd1753694..b90d63dd87 100644 --- a/test/e2e/resource-filtering/exclude_label.go +++ b/test/e2e/resource-filtering/exclude_label.go @@ -144,7 +144,7 @@ func (e *ExcludeFromBackup) Verify() error { Expect(apierrors.IsNotFound(err)).To(BeTrue()) //Check configmap: should be included - _, err = GetConfigmap(e.Client.ClientGo, namespace, e.CaseBaseName) + _, err = GetConfigMap(e.Client.ClientGo, namespace, e.CaseBaseName) Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list configmap in namespace: %q", namespace)) }) return nil diff --git a/test/e2e/resourcemodifiers/resource_modifiers.go b/test/e2e/resourcemodifiers/resource_modifiers.go index 1ab2c63945..e3bd9ea11d 100644 --- a/test/e2e/resourcemodifiers/resource_modifiers.go +++ b/test/e2e/resourcemodifiers/resource_modifiers.go @@ -134,7 +134,7 @@ func (r *ResourceModifiersCase) Clean() error { if CurrentSpecReport().Failed() && r.VeleroCfg.FailFast { fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.") } else { - if err := DeleteConfigmap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil { + if err := DeleteConfigMap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil { return err } diff --git a/test/e2e/resourcepolicies/resource_policies.go b/test/e2e/resourcepolicies/resource_policies.go index 7239c49fd5..94e238eda3 100644 --- a/test/e2e/resourcepolicies/resource_policies.go +++ b/test/e2e/resourcepolicies/resource_policies.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package filtering +package resourcepolicies import ( "fmt" @@ -29,7 +29,6 @@ import ( . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/e2e/test" . "github.com/vmware-tanzu/velero/test/util/k8s" - . "github.com/vmware-tanzu/velero/test/util/velero" ) const FileName = "test-data.txt" @@ -101,10 +100,6 @@ func (r *ResourcePoliciesCase) Init() error { } func (r *ResourcePoliciesCase) CreateResources() error { - By(("Installing storage class..."), func() { - Expect(InstallTestStorageClasses(fmt.Sprintf("../testdata/storage-class/%s.yaml", r.VeleroCfg.CloudProvider))).To(Succeed(), "Failed to install storage class") - }) - By(fmt.Sprintf("Create configmap %s in namespaces %s for workload\n", r.cmName, r.VeleroCfg.VeleroNamespace), func() { Expect(CreateConfigMapFromYAMLData(r.Client.ClientGo, r.yamlConfig, r.cmName, r.VeleroCfg.VeleroNamespace)).To(Succeed(), fmt.Sprintf("Failed to create configmap %s in namespaces %s for workload\n", r.cmName, r.VeleroCfg.VeleroNamespace)) }) @@ -181,11 +176,7 @@ func (r *ResourcePoliciesCase) Clean() error { if CurrentSpecReport().Failed() && r.VeleroCfg.FailFast { fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.") } else { - if err := r.deleteTestStorageClassList([]string{StorageClassName, StorageClassName2}); err != nil { - return err - } - - if err := DeleteConfigmap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil { + if err := DeleteConfigMap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil { return err } @@ -248,12 +239,3 @@ func (r *ResourcePoliciesCase) writeDataIntoPods(namespace, volName string) erro } return nil } - -func (r *ResourcePoliciesCase) deleteTestStorageClassList(scList []string) error { - for _, v := range scList { - if err := DeleteStorageClass(r.Ctx, r.Client, v); err != nil { - return err - } - } - return nil -} diff --git a/test/e2e/schedule/in_progress.go b/test/e2e/schedule/in_progress.go new file mode 100644 index 0000000000..3a148a8c1f --- /dev/null +++ b/test/e2e/schedule/in_progress.go @@ -0,0 +1,196 @@ +package schedule + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/test" + framework "github.com/vmware-tanzu/velero/test/e2e/test" + k8sutil "github.com/vmware-tanzu/velero/test/util/k8s" + veleroutil "github.com/vmware-tanzu/velero/test/util/velero" +) + +var ScheduleInProgressTest func() = framework.TestFunc(&InProgressCase{}) + +type InProgressCase struct { + framework.TestCase + namespace string + ScheduleName string + ScheduleArgs []string + volume string + podName string + pvcName string + podAnn map[string]string + podSleepDuration time.Duration +} + +func (s *InProgressCase) Init() error { + Expect(s.TestCase.Init()).To(Succeed()) + + s.CaseBaseName = "schedule-backup-creation-test" + s.UUIDgen + s.ScheduleName = "schedule-" + s.CaseBaseName + s.namespace = s.CaseBaseName + podSleepDurationStr := "60s" + s.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr) + + s.TestMsg = &framework.TestMSG{ + Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", + FailedMSG: "Failed to verify schedule back creation behavior", + Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", + } + + s.podAnn = map[string]string{ + "pre.hook.backup.velero.io/container": s.podName, + "pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]", + "pre.hook.backup.velero.io/timeout": "120s", + } + s.volume = "volume-1" + s.podName = "pod-1" + s.pvcName = "pvc-1" + s.ScheduleArgs = []string{ + "--include-namespaces", s.namespace, + "--schedule=@every 1m", + } + return nil +} + +func (s *InProgressCase) CreateResources() error { + By(fmt.Sprintf("Create namespace %s", s.namespace), func() { + Expect( + k8sutil.CreateNamespace( + s.Ctx, + s.Client, + s.namespace, + ), + ).To(Succeed(), + fmt.Sprintf("Failed to create namespace %s", s.namespace)) + }) + + By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() { + _, err := k8sutil.CreatePod( + s.Client, + s.namespace, + s.podName, + test.StorageClassName, + s.pvcName, + []string{s.volume}, + nil, + s.podAnn, + ) + Expect(err).To(Succeed()) + + err = k8sutil.WaitForPods( + s.Ctx, + s.Client, + s.namespace, + []string{s.podName}, + ) + Expect(err).To(Succeed()) + }) + return nil +} + +func (s *InProgressCase) Backup() error { + By(fmt.Sprintf("Creating schedule %s\n", s.ScheduleName), func() { + Expect( + veleroutil.VeleroScheduleCreate( + s.Ctx, + s.VeleroCfg.VeleroCLI, + s.VeleroCfg.VeleroNamespace, + s.ScheduleName, + s.ScheduleArgs, + ), + ).To( + Succeed(), + func() string { + veleroutil.RunDebug( + context.Background(), + s.VeleroCfg.VeleroCLI, + s.VeleroCfg.VeleroNamespace, + "", + "", + ) + + return "Fail to create schedule" + }) + }) + + By("Get backup every half minute.", func() { + err := wait.PollUntilContextTimeout( + s.Ctx, + 30*time.Second, + 5*time.Minute, + true, + func(ctx context.Context) (bool, error) { + backupList := new(velerov1api.BackupList) + + if err := s.Client.Kubebuilder.List( + s.Ctx, + backupList, + &kbclient.ListOptions{ + Namespace: s.VeleroCfg.VeleroNamespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + velerov1api.ScheduleNameLabel: s.ScheduleName, + }), + }, + ); err != nil { + return false, fmt.Errorf("failed to list backup in %s namespace for schedule %s: %s", + s.VeleroCfg.VeleroNamespace, s.ScheduleName, err.Error()) + } + + if len(backupList.Items) == 0 { + fmt.Println("No backup is found yet. Continue query on the next turn.") + return false, nil + } + + inProgressBackupCount := 0 + for _, backup := range backupList.Items { + if backup.Status.Phase == velerov1api.BackupPhaseInProgress { + inProgressBackupCount++ + } + } + + // There should be at most one in-progress backup per schedule. + Expect(inProgressBackupCount).Should(BeNumerically("<=", 1)) + + // Already ensured at most one in-progress backup when schedule triggered 2 backups. + // Succeed. + if len(backupList.Items) >= 2 { + return true, nil + } + + fmt.Println("Wait until the schedule triggers two backups.") + return false, nil + }, + ) + + Expect(err).To(Succeed()) + }) + return nil +} + +func (s *InProgressCase) Clean() error { + if CurrentSpecReport().Failed() && s.VeleroCfg.FailFast { + fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.") + } else { + Expect( + veleroutil.VeleroScheduleDelete( + s.Ctx, + s.VeleroCfg.VeleroCLI, + s.VeleroCfg.VeleroNamespace, + s.ScheduleName, + ), + ).To(Succeed()) + Expect(s.TestCase.Clean()).To(Succeed()) + } + + return nil +} diff --git a/test/e2e/schedule/ordered_resources.go b/test/e2e/schedule/ordered_resources.go index 6df2ab17d1..238a014c75 100644 --- a/test/e2e/schedule/ordered_resources.go +++ b/test/e2e/schedule/ordered_resources.go @@ -18,6 +18,7 @@ limitations under the License. //the ordered resources test related to https://github.com/vmware-tanzu/velero/issues/4561 import ( + "context" "fmt" "strings" "time" @@ -25,129 +26,189 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/labels" waitutil "k8s.io/apimachinery/pkg/util/wait" kbclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - . "github.com/vmware-tanzu/velero/test/e2e/test" - . "github.com/vmware-tanzu/velero/test/util/k8s" - . "github.com/vmware-tanzu/velero/test/util/velero" + framework "github.com/vmware-tanzu/velero/test/e2e/test" + k8sutil "github.com/vmware-tanzu/velero/test/util/k8s" + veleroutil "github.com/vmware-tanzu/velero/test/util/velero" ) -var ScheduleOrderedResources func() = TestFunc(&OrderedResources{}) +var ScheduleOrderedResources func() = framework.TestFunc(&OrderedResources{}) type OrderedResources struct { - Namespace string - ScheduleName string - OrderMap map[string]string - ScheduleArgs []string - TestCase + Namespace string + ScheduleName string + OrderResource map[string]string + ScheduleArgs []string + framework.TestCase } func (o *OrderedResources) Init() error { - o.TestCase.Init() + Expect(o.TestCase.Init()).To(Succeed()) + o.CaseBaseName = "ordered-resources-" + o.UUIDgen o.ScheduleName = "schedule-" + o.CaseBaseName o.Namespace = o.CaseBaseName + "-" + o.UUIDgen - o.OrderMap = map[string]string{ + + o.OrderResource = map[string]string{ "deployments": fmt.Sprintf("deploy-%s", o.CaseBaseName), "secrets": fmt.Sprintf("secret-%s", o.CaseBaseName), "configmaps": fmt.Sprintf("configmap-%s", o.CaseBaseName), } - o.TestMsg = &TestMSG{ + + orderResourceArray := make([]string, 0) + for k, v := range o.OrderResource { + orderResourceArray = append( + orderResourceArray, + fmt.Sprintf("%s=%s", k, v), + ) + } + orderResourceStr := strings.Join(orderResourceArray, ";") + + o.TestMsg = &framework.TestMSG{ Desc: "Create a schedule to backup resources in a specific order should be successful", FailedMSG: "Failed to verify schedule backup resources in a specific order", Text: "Create a schedule to backup resources in a specific order should be successful", } - o.ScheduleArgs = []string{"--schedule", "@every 1m", - "--include-namespaces", o.Namespace, "--default-volumes-to-fs-backup", "--ordered-resources"} - var orderStr string - for kind, resource := range o.OrderMap { - orderStr += fmt.Sprintf("%s=%s;", kind, resource) + + o.ScheduleArgs = []string{ + "--schedule", + "@every 1m", + "--include-namespaces", + o.Namespace, + "--default-volumes-to-fs-backup", + "--ordered-resources", + orderResourceStr, } - o.ScheduleArgs = append(o.ScheduleArgs, strings.TrimRight(orderStr, ";")) return nil } + func (o *OrderedResources) CreateResources() error { label := map[string]string{ "orderedresources": "true", } fmt.Printf("Creating resources in %s namespace ...\n", o.Namespace) - if err := CreateNamespace(o.Ctx, o.Client, o.Namespace); err != nil { + if err := k8sutil.CreateNamespace(o.Ctx, o.Client, o.Namespace); err != nil { return errors.Wrapf(err, "failed to create namespace %s", o.Namespace) } + //Create deployment deploymentName := fmt.Sprintf("deploy-%s", o.CaseBaseName) fmt.Printf("Creating deployment %s in %s namespaces ...\n", deploymentName, o.Namespace) - deployment := NewDeployment(deploymentName, o.Namespace, 1, label, nil).Result() - deployment, err := CreateDeployment(o.Client.ClientGo, o.Namespace, deployment) + deployment := k8sutil.NewDeployment(deploymentName, o.Namespace, 1, label, nil).Result() + _, err := k8sutil.CreateDeployment(o.Client.ClientGo, o.Namespace, deployment) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to create namespace %q with err %v", o.Namespace, err)) } - err = WaitForReadyDeployment(o.Client.ClientGo, o.Namespace, deployment.Name) + err = k8sutil.WaitForReadyDeployment(o.Client.ClientGo, o.Namespace, deployment.Name) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to ensure job completion in namespace: %q", o.Namespace)) } + //Create Secret secretName := fmt.Sprintf("secret-%s", o.CaseBaseName) fmt.Printf("Creating secret %s in %s namespaces ...\n", secretName, o.Namespace) - _, err = CreateSecret(o.Client.ClientGo, o.Namespace, secretName, label) + _, err = k8sutil.CreateSecret(o.Client.ClientGo, o.Namespace, secretName, label) if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to create secret in the namespace %q", o.Namespace)) } - err = WaitForSecretsComplete(o.Client.ClientGo, o.Namespace, secretName) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to ensure secret completion in namespace: %q", o.Namespace)) - } - //Create Configmap - configmapName := fmt.Sprintf("configmap-%s", o.CaseBaseName) - fmt.Printf("Creating configmap %s in %s namespaces ...\n", configmapName, o.Namespace) - _, err = CreateConfigMap(o.Client.ClientGo, o.Namespace, configmapName, label, nil) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to create configmap in the namespace %q", o.Namespace)) - } - err = WaitForConfigMapComplete(o.Client.ClientGo, o.Namespace, configmapName) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to ensure secret completion in namespace: %q", o.Namespace)) + + //Create ConfigMap + cmName := fmt.Sprintf("configmap-%s", o.CaseBaseName) + fmt.Printf("Creating ConfigMap %s in %s namespaces ...\n", cmName, o.Namespace) + if _, err := k8sutil.CreateConfigMap( + o.Client.ClientGo, + o.Namespace, + cmName, + label, + nil, + ); err != nil { + return errors.Wrap( + err, + fmt.Sprintf("failed to create ConfigMap in the namespace %q", o.Namespace), + ) } + return nil } func (o *OrderedResources) Backup() error { By(fmt.Sprintf("Create schedule the workload in %s namespace", o.Namespace), func() { - err := VeleroScheduleCreate(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, o.ScheduleName, o.ScheduleArgs) + err := veleroutil.VeleroScheduleCreate( + o.Ctx, + o.VeleroCfg.VeleroCLI, + o.VeleroCfg.VeleroNamespace, + o.ScheduleName, + o.ScheduleArgs, + ) Expect(err).To(Succeed(), fmt.Sprintf("Failed to create schedule %s with err %v", o.ScheduleName, err)) }) - return nil -} -func (o *OrderedResources) Destroy() error { - return nil -} - -func (o *OrderedResources) Verify() error { - By(fmt.Sprintf("Checking resource order in %s schedule cr", o.ScheduleName), func() { - err := CheckScheduleWithResourceOrder(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, o.ScheduleName, o.OrderMap) - Expect(err).To(Succeed(), fmt.Sprintf("Failed to check schedule %s with err %v", o.ScheduleName, err)) + By(fmt.Sprintf("Checking resource order in %s schedule CR", o.ScheduleName), func() { + err := veleroutil.CheckScheduleWithResourceOrder( + o.Ctx, + o.VeleroCfg.VeleroCLI, + o.VeleroCfg.VeleroNamespace, + o.ScheduleName, + o.OrderResource, + ) + Expect(err).To( + Succeed(), + fmt.Sprintf("Failed to check schedule %s with err %v", o.ScheduleName, err), + ) }) By("Checking resource order in backup cr", func() { - backupList := new(velerov1api.BackupList) - err := waitutil.PollImmediate(10*time.Second, time.Minute*5, func() (bool, error) { - if err := o.Client.Kubebuilder.List(o.Ctx, backupList, &kbclient.ListOptions{Namespace: o.VeleroCfg.VeleroNamespace}); err != nil { - return false, fmt.Errorf("failed to list backup object in %s namespace with err %v", o.VeleroCfg.VeleroNamespace, err) - } - - for _, backup := range backupList.Items { - if err := CheckBackupWithResourceOrder(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, backup.Name, o.OrderMap); err == nil { - return true, nil + err := waitutil.PollUntilContextTimeout( + o.Ctx, + 30*time.Second, + time.Minute*5, + true, + func(ctx context.Context) (bool, error) { + backupList := new(velerov1api.BackupList) + + if err := o.Client.Kubebuilder.List( + o.Ctx, + backupList, + &kbclient.ListOptions{ + Namespace: o.VeleroCfg.VeleroNamespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + velerov1api.ScheduleNameLabel: o.ScheduleName, + }), + }, + ); err != nil { + return false, fmt.Errorf("failed to list backup in %s namespace for schedule %s: %s", + o.VeleroCfg.VeleroNamespace, o.ScheduleName, err.Error()) + } + + for _, backup := range backupList.Items { + if err := veleroutil.CheckBackupWithResourceOrder( + o.Ctx, + o.VeleroCfg.VeleroCLI, + o.VeleroCfg.VeleroNamespace, + backup.Name, + o.OrderResource, + ); err == nil { + // After schedule successfully triggers a backup, + // the workload namespace is deleted. + // It's possible the following backup may fail. + // As a result, as long as there is one backup in Completed state, + // the case assumes test pass. + return true, nil + } } - } - fmt.Printf("still finding backup created by schedule %s ...\n", o.ScheduleName) - return false, nil - }) - Expect(err).To(Succeed(), fmt.Sprintf("Failed to check schedule %s created backup with err %v", o.ScheduleName, err)) + fmt.Printf("still finding backup created by schedule %s ...\n", o.ScheduleName) + return false, nil + }) + Expect(err).To( + Succeed(), + fmt.Sprintf("Failed to check schedule %s created backup with err %v", + o.ScheduleName, err), + ) }) return nil } @@ -156,22 +217,15 @@ func (o *OrderedResources) Clean() error { if CurrentSpecReport().Failed() && o.VeleroCfg.FailFast { fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.") } else { - Expect(VeleroScheduleDelete(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, o.ScheduleName)).To(Succeed()) + Expect(veleroutil.VeleroScheduleDelete( + o.Ctx, + o.VeleroCfg.VeleroCLI, + o.VeleroCfg.VeleroNamespace, + o.ScheduleName, + )).To(Succeed()) + Expect(o.TestCase.Clean()).To(Succeed()) } return nil } - -func (o *OrderedResources) DeleteAllBackups() error { - backupList := new(velerov1api.BackupList) - if err := o.Client.Kubebuilder.List(o.Ctx, backupList, &kbclient.ListOptions{Namespace: o.VeleroCfg.VeleroNamespace}); err != nil { - return fmt.Errorf("failed to list backup object in %s namespace with err %v", o.VeleroCfg.VeleroNamespace, err) - } - for _, backup := range backupList.Items { - if err := VeleroBackupDelete(o.Ctx, o.VeleroCfg.VeleroCLI, o.VeleroCfg.VeleroNamespace, backup.Name); err != nil { - return err - } - } - return nil -} diff --git a/test/e2e/schedule/periodical.go b/test/e2e/schedule/periodical.go new file mode 100644 index 0000000000..330356e998 --- /dev/null +++ b/test/e2e/schedule/periodical.go @@ -0,0 +1,210 @@ +package schedule + +import ( + "context" + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/wait" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + framework "github.com/vmware-tanzu/velero/test/e2e/test" + k8sutil "github.com/vmware-tanzu/velero/test/util/k8s" + veleroutil "github.com/vmware-tanzu/velero/test/util/velero" +) + +type PeriodicalCase struct { + framework.TestCase + ScheduleName string + ScheduleArgs []string + Period int // The minimum unit is minute. +} + +var SchedulePeriodicalTest func() = framework.TestFunc(&PeriodicalCase{}) + +func (n *PeriodicalCase) Init() error { + Expect(n.TestCase.Init()).To(Succeed()) + + n.CaseBaseName = "schedule-backup-" + n.UUIDgen + n.NSIncluded = &[]string{n.CaseBaseName} + n.ScheduleName = "schedule-" + n.CaseBaseName + n.RestoreName = "restore-" + n.CaseBaseName + n.TestMsg = &framework.TestMSG{ + Desc: "Set up a scheduled backup defined by a Cron expression", + FailedMSG: "Failed to schedule a backup", + Text: "Should backup periodically according to the schedule", + } + n.ScheduleArgs = []string{ + "--include-namespaces", strings.Join(*n.NSIncluded, ","), + "--schedule=@every 1m", + } + + return nil +} + +func (n *PeriodicalCase) CreateResources() error { + for _, ns := range *n.NSIncluded { + By(fmt.Sprintf("Creating namespaces %s ......\n", ns), func() { + Expect( + k8sutil.CreateNamespace( + n.Ctx, + n.Client, + ns, + ), + ).To( + Succeed(), + fmt.Sprintf("Failed to create namespace %s", ns), + ) + }) + + cmName := n.CaseBaseName + fmt.Printf("Creating ConfigMap %s in namespaces ...%s\n", cmName, ns) + _, err := k8sutil.CreateConfigMap( + n.Client.ClientGo, + ns, + cmName, + nil, + nil, + ) + Expect(err).To(Succeed(), fmt.Sprintf("failed to create ConfigMap in the namespace %q", ns)) + } + return nil +} + +func (n *PeriodicalCase) Backup() error { + By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() { + Expect( + veleroutil.VeleroScheduleCreate( + n.Ctx, + n.VeleroCfg.VeleroCLI, + n.VeleroCfg.VeleroNamespace, + n.ScheduleName, + n.ScheduleArgs, + ), + ).To(Succeed()) + }) + + By(fmt.Sprintf("No immediate backup is created by schedule %s\n", n.ScheduleName), func() { + backups, err := veleroutil.GetBackupsForSchedule( + n.Ctx, + n.Client.Kubebuilder, + n.ScheduleName, + n.VeleroCfg.Namespace, + ) + Expect(err).To(Succeed()) + Expect(backups).To(BeEmpty()) + }) + + By("Wait until schedule triggers backup.", func() { + err := wait.PollUntilContextTimeout( + n.Ctx, + 30*time.Second, + 5*time.Minute, + true, + func(ctx context.Context) (bool, error) { + backups, err := veleroutil.GetBackupsForSchedule( + n.Ctx, + n.Client.Kubebuilder, + n.ScheduleName, + n.VeleroCfg.Namespace, + ) + if err != nil { + fmt.Println("Fail to get backups for schedule.") + return false, err + } + + // The triggered backup completed. + if len(backups) == 1 && + backups[0].Status.Phase == velerov1api.BackupPhaseCompleted { + n.BackupName = backups[0].Name + return true, nil + } + + return false, nil + }, + ) + + Expect(err).To(Succeed()) + }) + + n.RestoreArgs = []string{ + "create", "--namespace", n.VeleroCfg.VeleroNamespace, "restore", n.RestoreName, + "--from-backup", n.BackupName, + "--wait", + } + + By(fmt.Sprintf("Pause schedule %s ......\n", n.ScheduleName), func() { + Expect( + veleroutil.VeleroSchedulePause( + n.Ctx, + n.VeleroCfg.VeleroCLI, + n.VeleroCfg.VeleroNamespace, + n.ScheduleName, + ), + ).To(Succeed()) + }) + + By(("Sleep 2 minutes"), func() { + time.Sleep(2 * time.Minute) + }) + + backups, err := veleroutil.GetBackupsForSchedule( + n.Ctx, + n.Client.Kubebuilder, + n.ScheduleName, + n.VeleroCfg.Namespace, + ) + Expect(err).To(Succeed(), fmt.Sprintf("Fail to get backups from schedule %s", n.ScheduleName)) + + backupCountPostPause := len(backups) + fmt.Printf("After pause, backups count is %d\n", backupCountPostPause) + + By(fmt.Sprintf("Verify no new backups from %s ......\n", n.ScheduleName), func() { + Expect(backupCountPostPause).To(Equal(1)) + }) + + By(fmt.Sprintf("Unpause schedule %s ......\n", n.ScheduleName), func() { + Expect( + veleroutil.VeleroScheduleUnpause( + n.Ctx, + n.VeleroCfg.VeleroCLI, + n.VeleroCfg.VeleroNamespace, + n.ScheduleName, + ), + ).To(Succeed()) + }) + + return nil +} + +func (n *PeriodicalCase) Verify() error { + By("Namespaces were restored", func() { + for _, ns := range *n.NSIncluded { + _, err := k8sutil.GetConfigMap(n.Client.ClientGo, ns, n.CaseBaseName) + Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list CM in namespace: %s\n", ns)) + } + }) + return nil +} + +func (n *PeriodicalCase) Clean() error { + if CurrentSpecReport().Failed() && n.VeleroCfg.FailFast { + fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.") + } else { + Expect( + veleroutil.VeleroScheduleDelete( + n.Ctx, + n.VeleroCfg.VeleroCLI, + n.VeleroCfg.VeleroNamespace, + n.ScheduleName, + ), + ).To(Succeed()) + + Expect(n.TestCase.Clean()).To(Succeed()) + } + + return nil +} diff --git a/test/e2e/schedule/schedule-backup-creation.go b/test/e2e/schedule/schedule-backup-creation.go deleted file mode 100644 index 9376f22904..0000000000 --- a/test/e2e/schedule/schedule-backup-creation.go +++ /dev/null @@ -1,137 +0,0 @@ -package schedule - -import ( - "context" - "fmt" - "math/rand" - "strings" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - . "github.com/vmware-tanzu/velero/test/e2e/test" - . "github.com/vmware-tanzu/velero/test/util/k8s" - . "github.com/vmware-tanzu/velero/test/util/velero" -) - -type ScheduleBackupCreation struct { - TestCase - namespace string - ScheduleName string - ScheduleArgs []string - Period int //Limitation: The unit is minitue only and 60 is divisible by it - randBackupName string - verifyTimes int - volume string - podName string - pvcName string - podAnn map[string]string - podSleepDuration time.Duration -} - -var ScheduleBackupCreationTest func() = TestFunc(&ScheduleBackupCreation{}) - -func (s *ScheduleBackupCreation) Init() error { - s.TestCase.Init() - s.CaseBaseName = "schedule-backup-creation-test" + s.UUIDgen - s.ScheduleName = "schedule-" + s.CaseBaseName - s.namespace = s.GetTestCase().CaseBaseName - s.Period = 3 // Unit is minute - s.verifyTimes = 5 // More larger verify times more confidence we have - podSleepDurationStr := "300s" - s.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr) - s.TestMsg = &TestMSG{ - Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", - FailedMSG: "Failed to verify schedule back creation behavior", - Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", - } - s.podAnn = map[string]string{ - "pre.hook.backup.velero.io/container": s.podName, - "pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]", - "pre.hook.backup.velero.io/timeout": "600s", - } - s.volume = "volume-1" - s.podName = "pod-1" - s.pvcName = "pvc-1" - s.ScheduleArgs = []string{ - "--include-namespaces", s.namespace, - "--schedule=*/" + fmt.Sprintf("%v", s.Period) + " * * * *", - } - Expect(s.Period).To(BeNumerically("<", 30)) - return nil -} - -func (s *ScheduleBackupCreation) CreateResources() error { - By(fmt.Sprintf("Create namespace %s", s.namespace), func() { - Expect(CreateNamespace(s.Ctx, s.Client, s.namespace)).To(Succeed(), - fmt.Sprintf("Failed to create namespace %s", s.namespace)) - }) - - By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() { - _, err := CreatePod(s.Client, s.namespace, s.podName, "default", s.pvcName, []string{s.volume}, nil, s.podAnn) - Expect(err).To(Succeed()) - err = WaitForPods(s.Ctx, s.Client, s.namespace, []string{s.podName}) - Expect(err).To(Succeed()) - }) - return nil -} - -func (s *ScheduleBackupCreation) Backup() error { - // Wait until the beginning of the given period to create schedule, it will give us - // a predictable period to wait for the first scheduled backup, and verify no immediate - // scheduled backup was created between schedule creation and first scheduled backup. - By(fmt.Sprintf("Creating schedule %s ......\n", s.ScheduleName), func() { - for i := 0; i < s.Period*60/30; i++ { - time.Sleep(30 * time.Second) - now := time.Now().Minute() - triggerNow := now % s.Period - if triggerNow == 0 { - Expect(VeleroScheduleCreate(s.Ctx, s.VeleroCfg.VeleroCLI, s.VeleroCfg.VeleroNamespace, s.ScheduleName, s.ScheduleArgs)).To(Succeed(), func() string { - RunDebug(context.Background(), s.VeleroCfg.VeleroCLI, s.VeleroCfg.VeleroNamespace, "", "") - return "Fail to create schedule" - }) - break - } - } - }) - - By("Delay one more minute to make sure the new backup was created in the given period", func() { - time.Sleep(1 * time.Minute) - }) - - By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", s.Period), func() { - for i := 1; i <= s.verifyTimes; i++ { - fmt.Printf("Start to sleep %d minute #%d time...\n", s.podSleepDuration, i) - mi, _ := time.ParseDuration("60s") - time.Sleep(s.podSleepDuration + mi) - bMap := make(map[string]string) - backupsInfo, err := GetScheduledBackupsCreationTime(s.Ctx, s.VeleroCfg.VeleroCLI, "default", s.ScheduleName) - Expect(err).To(Succeed()) - Expect(backupsInfo).To(HaveLen(i)) - for index, bi := range backupsInfo { - bList := strings.Split(bi, ",") - fmt.Printf("Backup %d: %v\n", index, bList) - bMap[bList[0]] = bList[1] - _, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1]) - Expect(err).To(Succeed()) - } - if i == s.verifyTimes-1 { - backupInfo := backupsInfo[rand.Intn(len(backupsInfo))] - s.randBackupName = strings.Split(backupInfo, ",")[0] - } - } - }) - return nil -} - -func (s *ScheduleBackupCreation) Clean() error { - if CurrentSpecReport().Failed() && s.VeleroCfg.FailFast { - fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.") - } else { - Expect(VeleroScheduleDelete(s.Ctx, s.VeleroCfg.VeleroCLI, s.VeleroCfg.VeleroNamespace, s.ScheduleName)).To(Succeed()) - Expect(s.TestCase.Clean()).To(Succeed()) - } - - return nil -} diff --git a/test/e2e/schedule/schedule.go b/test/e2e/schedule/schedule.go deleted file mode 100644 index f1a4bfe213..0000000000 --- a/test/e2e/schedule/schedule.go +++ /dev/null @@ -1,214 +0,0 @@ -package schedule - -import ( - "context" - "fmt" - "math/rand" - "strings" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - . "github.com/vmware-tanzu/velero/test/e2e/test" - . "github.com/vmware-tanzu/velero/test/util/k8s" - . "github.com/vmware-tanzu/velero/test/util/velero" -) - -type ScheduleBackup struct { - TestCase - ScheduleName string - ScheduleArgs []string - Period int //Limitation: The unit is minitue only and 60 is divisible by it - randBackupName string - verifyTimes int -} - -var ScheduleBackupTest func() = TestFunc(&ScheduleBackup{}) - -func (n *ScheduleBackup) Init() error { - n.TestCase.Init() - n.CaseBaseName = "schedule-backup-" + n.UUIDgen - n.NSIncluded = &[]string{n.CaseBaseName} - n.ScheduleName = "schedule-" + n.CaseBaseName - n.RestoreName = "restore-" + n.CaseBaseName - n.Period = 3 // Unit is minute - n.verifyTimes = 5 // More larger verify times more confidence we have - n.TestMsg = &TestMSG{ - Desc: "Set up a scheduled backup defined by a Cron expression", - FailedMSG: "Failed to schedule a backup", - Text: "should backup periodly according to the schedule", - } - n.ScheduleArgs = []string{ - "--include-namespaces", strings.Join(*n.NSIncluded, ","), - "--schedule=*/" + fmt.Sprintf("%v", n.Period) + " * * * *", - } - - Expect(n.Period).To(BeNumerically("<", 30)) - return nil -} -func (n *ScheduleBackup) CreateResources() error { - for _, ns := range *n.NSIncluded { - By(fmt.Sprintf("Creating namespaces %s ......\n", ns), func() { - Expect(CreateNamespace(n.Ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) - }) - configmaptName := n.CaseBaseName - fmt.Printf("Creating configmap %s in namespaces ...%s\n", configmaptName, ns) - _, err := CreateConfigMap(n.Client.ClientGo, ns, configmaptName, nil, nil) - Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", ns)) - Expect(WaitForConfigMapComplete(n.Client.ClientGo, ns, configmaptName)).To(Succeed(), - fmt.Sprintf("failed to ensure secret completion in namespace: %q", ns)) - } - return nil -} - -func (n *ScheduleBackup) Backup() error { - // Wait until the beginning of the given period to create schedule, it will give us - // a predictable period to wait for the first scheduled backup, and verify no immediate - // scheduled backup was created between schedule creation and first scheduled backup. - By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() { - for i := 0; i < n.Period*60/30; i++ { - time.Sleep(30 * time.Second) - now := time.Now().Minute() - triggerNow := now % n.Period - if triggerNow == 0 { - Expect(VeleroScheduleCreate(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string { - RunDebug(context.Background(), n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, "", "") - return "Fail to create schedule" - }) - break - } - } - }) - - By(fmt.Sprintf("Schedule %s is created without any delay\n", n.ScheduleName), func() { - creationTimestamp, err := GetSchedule(n.Ctx, n.VeleroCfg.VeleroNamespace, n.ScheduleName) - Expect(err).To(Succeed()) - - creationTime, err := time.Parse(time.RFC3339, strings.Replace(creationTimestamp, "'", "", -1)) - Expect(err).To(Succeed()) - fmt.Printf("Schedule %s created at %s\n", n.ScheduleName, creationTime) - now := time.Now() - diff := creationTime.Sub(now) - Expect(diff.Minutes()).To(BeNumerically("<", 1)) - }) - - By(fmt.Sprintf("No immediate backup is created by schedule %s\n", n.ScheduleName), func() { - for i := 0; i < n.Period; i++ { - time.Sleep(1 * time.Minute) - now := time.Now() - fmt.Printf("Get backup for #%d time at %v\n", i, now) - //Ignore the last minute in the period avoiding met the 1st backup by schedule - if i != n.Period-1 { - backupsInfo, err := GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName) - Expect(err).To(Succeed()) - Expect(backupsInfo).To(BeEmpty()) - } - } - }) - - By("Delay one more minute to make sure the new backup was created in the given period", func() { - time.Sleep(time.Minute) - }) - - By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", n.Period), func() { - for i := 0; i < n.verifyTimes; i++ { - fmt.Printf("Start to sleep %d minute #%d time...\n", n.Period, i+1) - time.Sleep(time.Duration(n.Period) * time.Minute) - bMap := make(map[string]string) - backupsInfo, err := GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName) - Expect(err).To(Succeed()) - Expect(backupsInfo).To(HaveLen(i + 2)) - for index, bi := range backupsInfo { - bList := strings.Split(bi, ",") - fmt.Printf("Backup %d: %v\n", index, bList) - bMap[bList[0]] = bList[1] - _, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1]) - Expect(err).To(Succeed()) - } - if i == n.verifyTimes-1 { - backupInfo := backupsInfo[rand.Intn(len(backupsInfo))] - n.randBackupName = strings.Split(backupInfo, ",")[0] - } - } - }) - - n.BackupName = strings.Replace(n.randBackupName, " ", "", -1) - - n.RestoreArgs = []string{ - "create", "--namespace", n.VeleroCfg.VeleroNamespace, "restore", n.RestoreName, - "--from-backup", n.BackupName, - "--wait", - } - - backupsInfo, err := GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName) - Expect(err).To(Succeed(), fmt.Sprintf("Fail to get backups from schedule %s", n.ScheduleName)) - fmt.Println(backupsInfo) - backupCount := len(backupsInfo) - - By(fmt.Sprintf("Pause schedule %s ......\n", n.ScheduleName), func() { - Expect(VeleroSchedulePause(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed(), func() string { - RunDebug(context.Background(), n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, "", "") - return "Fail to pause schedule" - }) - }) - - periodCount := 3 - sleepDuration := time.Duration(n.Period*periodCount) * time.Minute - By(fmt.Sprintf("Sleep for %s ......\n", sleepDuration), func() { - time.Sleep(sleepDuration) - }) - - backupsInfo, err = GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName) - Expect(err).To(Succeed(), fmt.Sprintf("Fail to get backups from schedule %s", n.ScheduleName)) - - backupCountPostPause := len(backupsInfo) - fmt.Printf("After pause, backkups count is %d\n", backupCountPostPause) - - By(fmt.Sprintf("Verify no new backups from %s ......\n", n.ScheduleName), func() { - Expect(backupCountPostPause).To(Equal(backupCount)) - }) - - By(fmt.Sprintf("Unpause schedule %s ......\n", n.ScheduleName), func() { - Expect(VeleroScheduleUnpause(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed(), func() string { - RunDebug(context.Background(), n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, "", "") - return "Fail to unpause schedule" - }) - }) - - By(fmt.Sprintf("Sleep for %s ......\n", sleepDuration), func() { - time.Sleep(sleepDuration) - }) - - backupsInfo, err = GetScheduledBackupsCreationTime(n.Ctx, n.VeleroCfg.VeleroCLI, "default", n.ScheduleName) - Expect(err).To(Succeed(), fmt.Sprintf("Fail to get backups from schedule %s", n.ScheduleName)) - fmt.Println(backupsInfo) - backupCountPostUnpause := len(backupsInfo) - fmt.Printf("After unpause, backkups count is %d\n", backupCountPostUnpause) - By(fmt.Sprintf("Verify no new backups by schedule %s ......\n", n.ScheduleName), func() { - Expect(backupCountPostUnpause - backupCount).To(BeNumerically(">=", periodCount-1)) - }) - return nil -} - -func (n *ScheduleBackup) Verify() error { - By("Namespaces were restored", func() { - for _, ns := range *n.NSIncluded { - configmap, err := GetConfigmap(n.Client.ClientGo, ns, n.CaseBaseName) - fmt.Printf("Restored configmap is %v\n", configmap) - Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list configmap in namespace: %q\n", ns)) - } - }) - return nil -} - -func (n *ScheduleBackup) Clean() error { - if CurrentSpecReport().Failed() && n.VeleroCfg.FailFast { - fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.") - } else { - Expect(VeleroScheduleDelete(n.Ctx, n.VeleroCfg.VeleroCLI, n.VeleroCfg.VeleroNamespace, n.ScheduleName)).To(Succeed()) - Expect(n.TestCase.Clean()).To(Succeed()) - } - - return nil -} diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 3c371064c3..38a8c8a0ca 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -78,8 +78,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC By("Uninstall Velero", func() { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) }) } }) @@ -97,8 +96,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC By("Uninstall Velero", func() { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() - Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, - veleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, veleroCfg)).To(Succeed()) }) } } diff --git a/test/perf/e2e_suite_test.go b/test/perf/e2e_suite_test.go index 383e78a78f..cb4522d699 100644 --- a/test/perf/e2e_suite_test.go +++ b/test/perf/e2e_suite_test.go @@ -139,7 +139,7 @@ var _ = AfterSuite(func() { By("release test resources after testing") ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() - Expect(VeleroUninstall(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed()) + Expect(VeleroUninstall(ctx, VeleroCfg)).To(Succeed()) } } }) diff --git a/test/testdata/storage-class/README.md b/test/testdata/storage-class/README.md new file mode 100644 index 0000000000..52f27382fd --- /dev/null +++ b/test/testdata/storage-class/README.md @@ -0,0 +1,12 @@ +The `test/testdata/storage-class` directory contains the StorageClass YAMLs used for E2E. +The public cloud provider (including AWS, Azure and GCP) has two StorageClasses. +* The `provider-name`.yaml contains the default StorageClass for the provider. It uses the CSI provisioner. +* The `provider-name`-legacy.yaml contains the legacy StorageClass for the provider. It uses the in-tree volume plugin as the provisioner. By far, there is no E2E case using them. + +The vSphere environment also has two StorageClass files. +* The vsphere-legacy.yaml is used for the TKGm environment. +* The vsphere.yaml is used for the VKS environment. + +The ZFS StorageClasses only have the default one. There is no in-tree volume plugin used StorageClass used in E2E. + +The kind StorageClass uses the local-path provisioner. Will consider adding the CSI provisioner when there is a need. diff --git a/test/testdata/storage-class/aws-csi.yaml b/test/testdata/storage-class/aws-legecy.yaml similarity index 70% rename from test/testdata/storage-class/aws-csi.yaml rename to test/testdata/storage-class/aws-legecy.yaml index 48c95fdc9c..29e79c8eac 100644 --- a/test/testdata/storage-class/aws-csi.yaml +++ b/test/testdata/storage-class/aws-legecy.yaml @@ -1,8 +1,8 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: e2e-csi-storage-class -provisioner: ebs.csi.aws.com + name: e2e-storage-class +provisioner: kubernetes.io/aws-ebs parameters: type: gp2 reclaimPolicy: Delete diff --git a/test/testdata/storage-class/aws.yaml b/test/testdata/storage-class/aws.yaml index 29e79c8eac..848357aecd 100644 --- a/test/testdata/storage-class/aws.yaml +++ b/test/testdata/storage-class/aws.yaml @@ -2,7 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: e2e-storage-class -provisioner: kubernetes.io/aws-ebs +provisioner: ebs.csi.aws.com parameters: type: gp2 reclaimPolicy: Delete diff --git a/test/testdata/storage-class/azure-csi.yaml b/test/testdata/storage-class/azure-legacy.yaml similarity index 76% rename from test/testdata/storage-class/azure-csi.yaml rename to test/testdata/storage-class/azure-legacy.yaml index 5ef573b48b..aa9451bf67 100644 --- a/test/testdata/storage-class/azure-csi.yaml +++ b/test/testdata/storage-class/azure-legacy.yaml @@ -1,8 +1,8 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: e2e-csi-storage-class -provisioner: disk.csi.azure.com + name: e2e-storage-class +provisioner: kubernetes.io/azure-disk parameters: cachingmode: ReadOnly kind: Managed diff --git a/test/testdata/storage-class/azure.yaml b/test/testdata/storage-class/azure.yaml index aa9451bf67..4a153cab3a 100644 --- a/test/testdata/storage-class/azure.yaml +++ b/test/testdata/storage-class/azure.yaml @@ -2,7 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: e2e-storage-class -provisioner: kubernetes.io/azure-disk +provisioner: disk.csi.azure.com parameters: cachingmode: ReadOnly kind: Managed diff --git a/test/testdata/storage-class/gcp-csi.yaml b/test/testdata/storage-class/gcp-legacy.yaml similarity index 78% rename from test/testdata/storage-class/gcp-csi.yaml rename to test/testdata/storage-class/gcp-legacy.yaml index 399061fa84..397a4b19f3 100644 --- a/test/testdata/storage-class/gcp-csi.yaml +++ b/test/testdata/storage-class/gcp-legacy.yaml @@ -4,10 +4,10 @@ kind: StorageClass metadata: labels: addonmanager.kubernetes.io/mode: EnsureExists - name: e2e-csi-storage-class + name: e2e-storage-class parameters: type: pd-standard -provisioner: pd.csi.storage.gke.io +provisioner: kubernetes.io/gce-pd reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer diff --git a/test/testdata/storage-class/gcp.yaml b/test/testdata/storage-class/gcp.yaml index 397a4b19f3..8a60ed4e5b 100644 --- a/test/testdata/storage-class/gcp.yaml +++ b/test/testdata/storage-class/gcp.yaml @@ -7,7 +7,7 @@ metadata: name: e2e-storage-class parameters: type: pd-standard -provisioner: kubernetes.io/gce-pd +provisioner: pd.csi.storage.gke.io reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer diff --git a/test/testdata/storage-class/vanilla-zfs-csi.yaml b/test/testdata/storage-class/vanilla-zfs-csi.yaml deleted file mode 100644 index 697b80a536..0000000000 --- a/test/testdata/storage-class/vanilla-zfs-csi.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: e2e-csi-storage-class -parameters: - recordsize: "128k" - compression: "off" - dedup: "off" - fstype: "zfs" - poolname: "zfspv-pool" -provisioner: zfs.csi.openebs.io -volumeBindingMode: WaitForFirstConsumer -reclaimPolicy: Retain \ No newline at end of file diff --git a/test/testdata/storage-class/vanilla-zfs.yaml b/test/testdata/storage-class/vanilla-zfs.yaml index c7ada98b83..34c063b1a7 100644 --- a/test/testdata/storage-class/vanilla-zfs.yaml +++ b/test/testdata/storage-class/vanilla-zfs.yaml @@ -9,4 +9,4 @@ parameters: fstype: "zfs" poolname: "zfspv-pool" provisioner: zfs.csi.openebs.io -volumeBindingMode: WaitForFirstConsumer \ No newline at end of file +volumeBindingMode: WaitForFirstConsumer diff --git a/test/testdata/storage-class/vsphere-csi.yaml b/test/testdata/storage-class/vsphere-legacy.yaml similarity index 53% rename from test/testdata/storage-class/vsphere-csi.yaml rename to test/testdata/storage-class/vsphere-legacy.yaml index 911751cfba..9449fa0113 100644 --- a/test/testdata/storage-class/vsphere-csi.yaml +++ b/test/testdata/storage-class/vsphere-legacy.yaml @@ -1,12 +1,11 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: e2e-csi-storage-class + name: e2e-storage-class annotations: storageclass.kubernetes.io/is-default-class: "false" parameters: - # StoragePolicyName: "vSAN Default Storage Policy" # This is used for the TKGm environment. - svStorageClass: worker-storagepolicy # This is used for TKGs/uTKG environment. + StoragePolicyName: "vSAN Default Storage Policy" # This is used for TKGm environment. provisioner: csi.vsphere.vmware.com reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer \ No newline at end of file diff --git a/test/testdata/storage-class/vsphere.yaml b/test/testdata/storage-class/vsphere.yaml index e09b09a603..f26bdf1f0b 100644 --- a/test/testdata/storage-class/vsphere.yaml +++ b/test/testdata/storage-class/vsphere.yaml @@ -5,8 +5,7 @@ metadata: annotations: storageclass.kubernetes.io/is-default-class: "false" parameters: - #StoragePolicyName: "vSAN Default Storage Policy" # This is used for TKGm environment. - svStorageClass: worker-storagepolicy # This is used for TKGs/uTKG environment. + svStorageClass: worker-storagepolicy provisioner: csi.vsphere.vmware.com reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer \ No newline at end of file diff --git a/test/testdata/volume-snapshot-class/vanilla-zfs.yaml b/test/testdata/volume-snapshot-class/vanilla-zfs.yaml index 3649215e84..c5ed37a449 100644 --- a/test/testdata/volume-snapshot-class/vanilla-zfs.yaml +++ b/test/testdata/volume-snapshot-class/vanilla-zfs.yaml @@ -1,7 +1,7 @@ kind: VolumeSnapshotClass apiVersion: snapshot.storage.k8s.io/v1 metadata: - name: zfspv-snapclass + name: e2e-volume-snapshot-class annotations: snapshot.storage.kubernetes.io/is-default-class: "true" labels: diff --git a/test/testdata/volume-snapshot-class/vsphere.yaml b/test/testdata/volume-snapshot-class/vsphere.yaml index 08ea9b2225..6bab9b5a28 100644 --- a/test/testdata/volume-snapshot-class/vsphere.yaml +++ b/test/testdata/volume-snapshot-class/vsphere.yaml @@ -8,6 +8,6 @@ metadata: snapshot.storage.kubernetes.io/is-default-class: "true" labels: velero.io/csi-volumesnapshot-class: "true" - name: volumesnapshotclass-delete + name: e2e-volume-snapshot-class parameters: svVolumeSnapshotClass: volumesnapshotclass-delete diff --git a/test/types.go b/test/types.go index 527068599c..35a1502c2d 100644 --- a/test/types.go +++ b/test/types.go @@ -25,9 +25,12 @@ import ( "github.com/vmware-tanzu/velero/test/util/k8s" ) +// e2e-storage-class is the default StorageClass for E2E. const StorageClassName = "e2e-storage-class" + +// e2e-storage-class-2 is used for the StorageClass mapping test case. const StorageClassName2 = "e2e-storage-class-2" -const CSIStorageClassName = "e2e-csi-storage-class" + const FeatureCSI = "EnableCSI" const VanillaZFS = "vanilla-zfs" const Kind = "kind" diff --git a/test/util/k8s/configmap.go b/test/util/k8s/configmap.go index e4edd0667b..41f1844329 100644 --- a/test/util/k8s/configmap.go +++ b/test/util/k8s/configmap.go @@ -57,9 +57,9 @@ func CreateConfigMapFromYAMLData(c clientset.Interface, yamlData, cmName, namesp } // WaitForConfigMapComplete uses c to wait for completions to complete for the Job jobName in namespace ns. -func WaitForConfigMapComplete(c clientset.Interface, ns, configmapName string) error { +func WaitForConfigMapComplete(c clientset.Interface, ns, cmName string) error { return wait.Poll(PollInterval, PollTimeout, func() (bool, error) { - _, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), configmapName, metav1.GetOptions{}) + _, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), cmName, metav1.GetOptions{}) if err != nil { return false, err } @@ -67,19 +67,19 @@ func WaitForConfigMapComplete(c clientset.Interface, ns, configmapName string) e }) } -func GetConfigmap(c clientset.Interface, ns, secretName string) (*v1.ConfigMap, error) { +func GetConfigMap(c clientset.Interface, ns, secretName string) (*v1.ConfigMap, error) { return c.CoreV1().ConfigMaps(ns).Get(context.TODO(), secretName, metav1.GetOptions{}) } -func DeleteConfigmap(c clientset.Interface, ns, name string) error { +func DeleteConfigMap(c clientset.Interface, ns, name string) error { if err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to delete configmap in namespace %q", ns)) + return errors.Wrap(err, fmt.Sprintf("failed to delete ConfigMap in namespace %q", ns)) } return nil } func WaitForConfigmapDelete(c clientset.Interface, ns, name string) error { - if err := DeleteConfigmap(c, ns, name); err != nil { + if err := DeleteConfigMap(c, ns, name); err != nil { return err } diff --git a/test/util/velero/install.go b/test/util/velero/install.go index 9da1c4b83a..cc81813bc9 100644 --- a/test/util/velero/install.go +++ b/test/util/velero/install.go @@ -23,7 +23,6 @@ import ( "fmt" "os" "os/exec" - "strings" "time" "github.com/pkg/errors" @@ -129,7 +128,7 @@ func VeleroInstall(ctx context.Context, veleroCfg *test.VeleroConfig, isStandbyC _, err = k8s.GetNamespace(ctx, *veleroCfg.ClientToInstallVelero, veleroCfg.VeleroNamespace) // We should uninstall Velero for a new service account creation. if !apierrors.IsNotFound(err) { - if err := VeleroUninstall(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace); err != nil { + if err := VeleroUninstall(context.Background(), *veleroCfg); err != nil { return errors.Wrapf(err, "Failed to uninstall velero %s", veleroCfg.VeleroNamespace) } } @@ -150,15 +149,19 @@ func VeleroInstall(ctx context.Context, veleroCfg *test.VeleroConfig, isStandbyC return errors.Wrapf(err, "Failed to create service account %s to %s namespace", veleroInstallOptions.ServiceAccountName, veleroCfg.VeleroNamespace) } } - err = installVeleroServer(ctx, veleroCfg.VeleroCLI, veleroCfg.CloudProvider, &installOptions{ - Options: veleroInstallOptions, - RegistryCredentialFile: veleroCfg.RegistryCredentialFile, - RestoreHelperImage: veleroCfg.RestoreHelperImage, - VeleroServerDebugMode: veleroCfg.VeleroServerDebugMode, - WithoutDisableInformerCacheParam: veleroCfg.WithoutDisableInformerCacheParam, - }) - if err != nil { + if err := installVeleroServer( + ctx, + veleroCfg.VeleroCLI, + veleroCfg.CloudProvider, + &installOptions{ + Options: veleroInstallOptions, + RegistryCredentialFile: veleroCfg.RegistryCredentialFile, + RestoreHelperImage: veleroCfg.RestoreHelperImage, + VeleroServerDebugMode: veleroCfg.VeleroServerDebugMode, + WithoutDisableInformerCacheParam: veleroCfg.WithoutDisableInformerCacheParam, + }, + ); err != nil { time.Sleep(9 * time.Hour) RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, "", "") return errors.WithMessagef(err, "Failed to install Velero in the cluster") @@ -247,7 +250,7 @@ func cleanVSpherePluginConfig(c clientset.Interface, ns, secretName, configMapNa } //clear configmap - _, err = k8s.GetConfigmap(c, ns, configMapName) + _, err = k8s.GetConfigMap(c, ns, configMapName) if err == nil { if err := k8s.WaitForConfigmapDelete(c, ns, configMapName); err != nil { return errors.WithMessagef(err, "Failed to clear up vsphere plugin configmap in %s namespace", ns) @@ -320,14 +323,6 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options if len(options.Features) > 0 { args = append(args, "--features", options.Features) - if strings.EqualFold(options.Features, test.FeatureCSI) && options.UseVolumeSnapshots { - // https://github.com/openebs/zfs-localpv/blob/develop/docs/snapshot.md - fmt.Printf("Start to install %s VolumeSnapshotClass ... \n", cloudProvider) - if err := k8s.KubectlApplyByFile(ctx, fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml", cloudProvider)); err != nil { - fmt.Println("Fail to install VolumeSnapshotClass when CSI feature is enabled: ", err) - return err - } - } } if options.GarbageCollectionFrequency > 0 { @@ -374,14 +369,14 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options args = append(args, fmt.Sprintf("--uploader-type=%v", options.UploaderType)) } - if err := createVelereResources(ctx, cli, namespace, args, options); err != nil { + if err := createVeleroResources(ctx, cli, namespace, args, options); err != nil { return err } return waitVeleroReady(ctx, namespace, options.UseNodeAgent) } -func createVelereResources(ctx context.Context, cli, namespace string, args []string, options *installOptions) error { +func createVeleroResources(ctx context.Context, cli, namespace string, args []string, options *installOptions) error { args = append(args, "--dry-run", "--output", "json", "--crds-only") // get the CRD definitions @@ -670,7 +665,7 @@ func PrepareVelero(ctx context.Context, caseName string, veleroCfg test.VeleroCo fmt.Printf("error in checking velero status with %v", err) ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() - VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace) + VeleroUninstall(ctx, veleroCfg) ready = false } if ready { @@ -681,9 +676,15 @@ func PrepareVelero(ctx context.Context, caseName string, veleroCfg test.VeleroCo return VeleroInstall(context.Background(), &veleroCfg, false) } -func VeleroUninstall(ctx context.Context, cli, namespace string) error { - stdout, stderr, err := velerexec.RunCommand(exec.CommandContext(ctx, cli, "uninstall", "--force", "-n", namespace)) - if err != nil { +func VeleroUninstall(ctx context.Context, veleroCfg test.VeleroConfig) error { + if stdout, stderr, err := velerexec.RunCommand(exec.CommandContext( + ctx, + veleroCfg.VeleroCLI, + "uninstall", + "--force", + "-n", + veleroCfg.VeleroNamespace, + )); err != nil { return errors.Wrapf(err, "failed to uninstall velero, stdout=%s, stderr=%s", stdout, stderr) } fmt.Println("Velero uninstalled ⛵") diff --git a/test/util/velero/velero_utils.go b/test/util/velero/velero_utils.go index d998246636..1605398529 100644 --- a/test/util/velero/velero_utils.go +++ b/test/util/velero/velero_utils.go @@ -37,6 +37,7 @@ import ( "github.com/pkg/errors" "golang.org/x/exp/slices" "golang.org/x/mod/semver" + "k8s.io/apimachinery/pkg/labels" ver "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" kbclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -334,7 +335,7 @@ func checkRestorePhase(ctx context.Context, veleroCLI string, veleroNamespace st func checkSchedulePhase(ctx context.Context, veleroCLI, veleroNamespace, scheduleName string) error { return wait.PollImmediate(time.Second*5, time.Minute*2, func() (bool, error) { - checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-ojson") + checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-o", "json") jsonBuf, err := common.CMDExecWithOutput(checkCMD) if err != nil { return false, err @@ -354,7 +355,7 @@ func checkSchedulePhase(ctx context.Context, veleroCLI, veleroNamespace, schedul } func checkSchedulePause(ctx context.Context, veleroCLI, veleroNamespace, scheduleName string, pause bool) error { - checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-ojson") + checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-o", "json") jsonBuf, err := common.CMDExecWithOutput(checkCMD) if err != nil { return err @@ -372,7 +373,7 @@ func checkSchedulePause(ctx context.Context, veleroCLI, veleroNamespace, schedul return nil } func CheckScheduleWithResourceOrder(ctx context.Context, veleroCLI, veleroNamespace, scheduleName string, order map[string]string) error { - checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-ojson") + checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-o", "json") jsonBuf, err := common.CMDExecWithOutput(checkCMD) if err != nil { return err @@ -393,8 +394,8 @@ func CheckScheduleWithResourceOrder(ctx context.Context, veleroCLI, veleroNamesp } } -func CheckBackupWithResourceOrder(ctx context.Context, veleroCLI, veleroNamespace, backupName string, order map[string]string) error { - checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "get", "backup", backupName, "-ojson") +func CheckBackupWithResourceOrder(ctx context.Context, veleroCLI, veleroNamespace, backupName string, orderResources map[string]string) error { + checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "get", "backup", backupName, "-o", "json") jsonBuf, err := common.CMDExecWithOutput(checkCMD) if err != nil { return err @@ -407,10 +408,10 @@ func CheckBackupWithResourceOrder(ctx context.Context, veleroCLI, veleroNamespac if backup.Status.Phase != velerov1api.BackupPhaseCompleted { return errors.Errorf("Unexpected restore phase got %s, expecting %s", backup.Status.Phase, velerov1api.BackupPhaseCompleted) } - if reflect.DeepEqual(backup.Spec.OrderedResources, order) { + if reflect.DeepEqual(backup.Spec.OrderedResources, orderResources) { return nil } else { - return fmt.Errorf("resource order %v set in backup command is not equal with order %v stored in backup cr", order, backup.Spec.OrderedResources) + return fmt.Errorf("resource order %v set in backup command is not equal with order %v stored in backup cr", orderResources, backup.Spec.OrderedResources) } } @@ -452,7 +453,7 @@ func VeleroBackupNamespace(ctx context.Context, veleroCLI, veleroNamespace strin args = append(args, "--snapshot-volumes=false") } // if "--snapshot-volumes" is not provide, snapshot should be taken as default behavior. } else { // DefaultVolumesToFsBackup is false - // Althrough DefaultVolumesToFsBackup is false, but probably DefaultVolumesToFsBackup + // Although DefaultVolumesToFsBackup is false, but probably DefaultVolumesToFsBackup // was set to true in installation CLI in snapshot volume test, so set DefaultVolumesToFsBackup // to false specifically to make sure volume snapshot was taken if backupCfg.UseVolumeSnapshots { @@ -462,7 +463,7 @@ func VeleroBackupNamespace(ctx context.Context, veleroCLI, veleroNamespace strin args = append(args, "--default-volumes-to-fs-backup=false") } } - // Also Althrough DefaultVolumesToFsBackup is false, but probably DefaultVolumesToFsBackup + // Although DefaultVolumesToFsBackup is false, but probably DefaultVolumesToFsBackup // was set to true in installation CLI in FS volume backup test, so do nothing here, no DefaultVolumesToFsBackup // appear in backup CLI } @@ -616,9 +617,7 @@ func RunDebug(ctx context.Context, veleroCLI, veleroNamespace, backup, restore s if len(backup) > 0 { args = append(args, "--backup", backup) } - if len(restore) > 0 { - //args = append(args, "--restore", restore) - } + fmt.Printf("Generating the debug tarball at %s\n", output) if err := VeleroCmdExec(ctx, veleroCLI, args); err != nil { fmt.Println(errors.Wrapf(err, "failed to run the debug command")) @@ -1183,51 +1182,35 @@ func GetLatestSuccessBackupsFromBSL(ctx context.Context, veleroCLI, bslName stri return backups[0], nil } -func GetScheduledBackupsCreationTime(ctx context.Context, veleroCLI, bslName, scheduleName string) ([]string, error) { - var creationTimes []string - backups, err := GetBackupsCreationTime(ctx, veleroCLI, bslName) - if err != nil { - return nil, err - } - for _, b := range backups { - if strings.Contains(b, scheduleName) { - creationTimes = append(creationTimes, b) - } - } - return creationTimes, nil -} -func GetBackupsCreationTime(ctx context.Context, veleroCLI, bslName string) ([]string, error) { - args1 := []string{"get", "backups"} - createdTime := "$1,\",\" $5,$6,$7,$8" - if strings.TrimSpace(bslName) != "" { - args1 = append(args1, "-l", "velero.io/storage-location="+bslName) - } - cmds := []*common.OsCommandLine{} - - cmd := &common.OsCommandLine{ - Cmd: veleroCLI, - Args: args1, - } - cmds = append(cmds, cmd) - - cmd = &common.OsCommandLine{ - Cmd: "awk", - Args: []string{"{print " + createdTime + "}"}, - } - cmds = append(cmds, cmd) +func GetBackupsForSchedule( + ctx context.Context, + client kbclient.Client, + scheduleName string, + namespace string, +) ([]velerov1api.Backup, error) { + backupList := new(velerov1api.BackupList) - cmd = &common.OsCommandLine{ - Cmd: "tail", - Args: []string{"-n", "+2"}, + if err := client.List( + ctx, + backupList, + &kbclient.ListOptions{ + Namespace: namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + velerov1api.ScheduleNameLabel: scheduleName, + }), + }, + ); err != nil { + return nil, fmt.Errorf("failed to list backup in %s namespace for schedule %s: %s", + namespace, scheduleName, err.Error()) } - cmds = append(cmds, cmd) - return common.GetListByCmdPipes(ctx, cmds) + return backupList.Items, nil } func GetAllBackups(ctx context.Context, veleroCLI string) ([]string, error) { return GetBackupsFromBsl(ctx, veleroCLI, "") } + func DeleteBslResource(ctx context.Context, veleroCLI string, bslName string) error { args := []string{"backup-location", "delete", bslName, "--confirm"} @@ -1431,6 +1414,7 @@ func VeleroUpgrade(ctx context.Context, veleroCfg VeleroConfig) error { } return waitVeleroReady(ctx, veleroCfg.VeleroNamespace, veleroCfg.UseNodeAgent) } + func ApplyCRDs(ctx context.Context, veleroCLI string) ([]string, error) { cmds := []*common.OsCommandLine{} @@ -1629,20 +1613,32 @@ func GetVeleroPodName(ctx context.Context) ([]string, error) { return common.GetListByCmdPipes(ctx, cmds) } -func InstallTestStorageClasses(path string) error { +// InstallStorageClasses create the "e2e-storage-class" and "e2e-storage-class-2" +// StorageClasses for E2E tests. +// +// e2e-storage-class is the default StorageClass for E2E. +// e2e-storage-class-2 is used for the StorageClass mapping test case. +// Kibishii StorageClass is not covered here. +func InstallStorageClasses(provider string) error { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) defer ctxCancel() - err := InstallStorageClass(ctx, path) - if err != nil { + + storageClassFilePath := fmt.Sprintf("../testdata/storage-class/%s.yaml", provider) + + if err := InstallStorageClass(ctx, storageClassFilePath); err != nil { return err } - content, err := os.ReadFile(path) + content, err := os.ReadFile(storageClassFilePath) if err != nil { - return errors.Wrapf(err, "failed to get %s when install storage class", path) + return errors.Wrapf(err, "failed to get %s when install storage class", storageClassFilePath) } - // replace sc to new value - newContent := strings.ReplaceAll(string(content), fmt.Sprintf("name: %s", StorageClassName), fmt.Sprintf("name: %s", StorageClassName2)) + // Replace the name to e2e-storage-class-2 + newContent := strings.ReplaceAll( + string(content), + fmt.Sprintf("name: %s", StorageClassName), + fmt.Sprintf("name: %s", StorageClassName2), + ) tmpFile, err := os.CreateTemp("", "sc-file") if err != nil {