From 01fce67d6532b7736007ade2736f95b38fe85c85 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Wed, 28 Aug 2024 10:49:46 -0300 Subject: [PATCH 01/17] fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- Makefile | 4 +- go.mod | 3 +- hack/extra-crds/velero.io_backups.yaml | 661 ++++++++++++++++++ internal/common/function/function.go | 5 + .../controller/nonadminbackup_controller.go | 24 +- .../nonadminbackup_controller_test.go | 231 ++++-- internal/controller/suite_test.go | 10 +- 7 files changed, 881 insertions(+), 57 deletions(-) create mode 100644 hack/extra-crds/velero.io_backups.yaml diff --git a/Makefile b/Makefile index 443fa00..49655ff 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,9 @@ # Image URL to use all building/pushing image targets IMG ?= quay.io/konveyor/oadp-non-admin:latest -# Kubernetes version from OpenShift 4.15.x https://openshift-release.apps.ci.l2s4.p1.openshiftapps.com/#4-stable +# Kubernetes version from OpenShift 4.16.x https://openshift-release.apps.ci.l2s4.p1.openshiftapps.com/#4-stable # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.28 +ENVTEST_K8S_VERSION = 1.29 # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) diff --git a/go.mod b/go.mod index e0f489d..ba2d575 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/onsi/gomega v1.30.0 github.com/stretchr/testify v1.8.4 github.com/vmware-tanzu/velero v1.12.0 + k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 sigs.k8s.io/controller-runtime v0.17.0 @@ -65,7 +66,6 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.0 // indirect k8s.io/apiextensions-apiserver v0.29.0 // indirect k8s.io/component-base v0.29.0 // indirect k8s.io/klog/v2 v2.110.1 // indirect @@ -76,4 +76,5 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) +// need update? replace github.com/vmware-tanzu/velero => github.com/openshift/velero v0.10.2-0.20231024175012-d8101a298016 diff --git a/hack/extra-crds/velero.io_backups.yaml b/hack/extra-crds/velero.io_backups.yaml new file mode 100644 index 0000000..d14b13d --- /dev/null +++ b/hack/extra-crds/velero.io_backups.yaml @@ -0,0 +1,661 @@ +# from https://github.com/openshift/oadp-operator/blob/master/config/crd/bases/velero.io_backups.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: backups.velero.io +spec: + group: velero.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + Backup is a Velero resource that represents the capture of Kubernetes + cluster state at a point in time (API objects and associated volume state). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupSpec defines the specification for a Velero backup. + properties: + csiSnapshotTimeout: + description: |- + CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to + ReadyToUse during creation, before returning error as timeout. + The default value is 10 minute. + type: string + datamover: + description: |- + DataMover specifies the data mover to be used by the backup. + If DataMover is "" or "velero", the built-in data mover will be used. + type: string + defaultVolumesToFsBackup: + description: |- + DefaultVolumesToFsBackup specifies whether pod volume file system backup should be used + for all volumes by default. + nullable: true + type: boolean + defaultVolumesToRestic: + description: |- + DefaultVolumesToRestic specifies whether restic should be used to take a + backup of all pod volumes by default. + + + Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead. + nullable: true + type: boolean + excludedClusterScopedResources: + description: |- + ExcludedClusterScopedResources is a slice of cluster-scoped + resource type names to exclude from the backup. + If set to "*", all cluster-scoped resource types are excluded. + The default value is empty. + items: + type: string + nullable: true + type: array + excludedNamespaceScopedResources: + description: |- + ExcludedNamespaceScopedResources is a slice of namespace-scoped + resource type names to exclude from the backup. + If set to "*", all namespace-scoped resource types are excluded. + The default value is empty. + items: + type: string + nullable: true + type: array + excludedNamespaces: + description: |- + ExcludedNamespaces contains a list of namespaces that are not + included in the backup. + items: + type: string + nullable: true + type: array + excludedResources: + description: |- + ExcludedResources is a slice of resource names that are not + included in the backup. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be executed + at different phases of the backup. + properties: + resources: + description: Resources are hooks that should be executed when + backing up individual instances of a resource. + items: + description: |- + BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on + the rules defined for namespaces, resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources to + which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: |- + IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: |- + IncludedResources specifies the resources to which this hook spec applies. If empty, it applies + to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the resources + to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: Name is the name of this hook. + type: string + post: + description: |- + PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. + These are executed after all "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments + to execute. + items: + type: string + minItems: 1 + type: array + container: + description: |- + Container is the container in the pod where the command should be executed. If not specified, + the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should + behave if it encounters an error executing this + hook. + enum: + - Continue + - Fail + type: string + timeout: + description: |- + Timeout defines the maximum amount of time Velero should wait for the hook to complete before + considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + pre: + description: |- + PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. + These are executed before any "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments + to execute. + items: + type: string + minItems: 1 + type: array + container: + description: |- + Container is the container in the pod where the command should be executed. If not specified, + the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should + behave if it encounters an error executing this + hook. + enum: + - Continue + - Fail + type: string + timeout: + description: |- + Timeout defines the maximum amount of time Velero should wait for the hook to complete before + considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + required: + - name + type: object + nullable: true + type: array + type: object + includeClusterResources: + description: |- + IncludeClusterResources specifies whether cluster-scoped resources + should be included for consideration in the backup. + nullable: true + type: boolean + includedClusterScopedResources: + description: |- + IncludedClusterScopedResources is a slice of cluster-scoped + resource type names to include in the backup. + If set to "*", all cluster-scoped resource types are included. + The default value is empty, which means only related + cluster-scoped resources are included. + items: + type: string + nullable: true + type: array + includedNamespaceScopedResources: + description: |- + IncludedNamespaceScopedResources is a slice of namespace-scoped + resource type names to include in the backup. + The default value is "*". + items: + type: string + nullable: true + type: array + includedNamespaces: + description: |- + IncludedNamespaces is a slice of namespace names to include objects + from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: |- + IncludedResources is a slice of resource names to include + in the backup. If empty, all resources are included. + items: + type: string + nullable: true + type: array + itemOperationTimeout: + description: |- + ItemOperationTimeout specifies the time used to wait for asynchronous BackupItemAction operations + The default value is 4 hour. + type: string + labelSelector: + description: |- + LabelSelector is a metav1.LabelSelector to filter with + when adding individual objects to the backup. If empty + or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + metadata: + properties: + labels: + additionalProperties: + type: string + type: object + type: object + orLabelSelectors: + description: |- + OrLabelSelectors is list of metav1.LabelSelector to filter with + when adding individual objects to the backup. If multiple provided + they will be joined by the OR operator. LabelSelector as well as + OrLabelSelectors cannot co-exist in backup request, only one of them + can be used. + items: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + orderedResources: + additionalProperties: + type: string + description: |- + OrderedResources specifies the backup order of resources of specific Kind. + The map key is the resource name and value is a list of object names separated by commas. + Each resource name has format "namespace/objectname". For cluster resources, simply use "objectname". + nullable: true + type: object + resourcePolicy: + description: ResourcePolicy specifies the referenced resource policies + that backup should follow + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + snapshotMoveData: + description: SnapshotMoveData specifies whether snapshot data should + be moved + nullable: true + type: boolean + snapshotVolumes: + description: |- + SnapshotVolumes specifies whether to take snapshots + of any PV's referenced in the set of objects included + in the Backup. + nullable: true + type: boolean + storageLocation: + description: StorageLocation is a string containing the name of a + BackupStorageLocation where the backup should be stored. + type: string + ttl: + description: |- + TTL is a time.Duration-parseable string describing how long + the Backup should be retained for. + type: string + uploaderConfig: + description: UploaderConfig specifies the configuration for the uploader. + nullable: true + properties: + parallelFilesUpload: + description: ParallelFilesUpload is the number of files parallel + uploads to perform when using the uploader. + type: integer + type: object + volumeSnapshotLocations: + description: VolumeSnapshotLocations is a list containing names of + VolumeSnapshotLocations associated with this backup. + items: + type: string + type: array + type: object + status: + description: BackupStatus captures the current status of a Velero backup. + properties: + backupItemOperationsAttempted: + description: |- + BackupItemOperationsAttempted is the total number of attempted + async BackupItemAction operations for this backup. + type: integer + backupItemOperationsCompleted: + description: |- + BackupItemOperationsCompleted is the total number of successfully completed + async BackupItemAction operations for this backup. + type: integer + backupItemOperationsFailed: + description: |- + BackupItemOperationsFailed is the total number of async + BackupItemAction operations for this backup which ended with an error. + type: integer + completionTimestamp: + description: |- + CompletionTimestamp records the time a backup was completed. + Completion time is recorded even on failed backups. + Completion time is recorded before uploading the backup object. + The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + csiVolumeSnapshotsAttempted: + description: |- + CSIVolumeSnapshotsAttempted is the total number of attempted + CSI VolumeSnapshots for this backup. + type: integer + csiVolumeSnapshotsCompleted: + description: |- + CSIVolumeSnapshotsCompleted is the total number of successfully + completed CSI VolumeSnapshots for this backup. + type: integer + errors: + description: |- + Errors is a count of all error messages that were generated during + execution of the backup. The actual errors are in the backup's log + file in object storage. + type: integer + expiration: + description: Expiration is when this Backup is eligible for garbage-collection. + format: date-time + nullable: true + type: string + failureReason: + description: FailureReason is an error that caused the entire backup + to fail. + type: string + formatVersion: + description: FormatVersion is the backup format version, including + major, minor, and patch version. + type: string + hookStatus: + description: HookStatus contains information about the status of the + hooks. + nullable: true + properties: + hooksAttempted: + description: |- + HooksAttempted is the total number of attempted hooks + Specifically, HooksAttempted represents the number of hooks that failed to execute + and the number of hooks that executed successfully. + type: integer + hooksFailed: + description: HooksFailed is the total number of hooks which ended + with an error + type: integer + type: object + phase: + description: Phase is the current state of the Backup. + enum: + - New + - FailedValidation + - InProgress + - WaitingForPluginOperations + - WaitingForPluginOperationsPartiallyFailed + - Finalizing + - FinalizingPartiallyFailed + - Completed + - PartiallyFailed + - Failed + - Deleting + type: string + progress: + description: |- + Progress contains information about the backup's execution progress. Note + that this information is best-effort only -- if Velero fails to update it + during a backup for any reason, it may be inaccurate/stale. + nullable: true + properties: + itemsBackedUp: + description: |- + ItemsBackedUp is the number of items that have actually been written to the + backup tarball so far. + type: integer + totalItems: + description: |- + TotalItems is the total number of items to be backed up. This number may change + throughout the execution of the backup due to plugins that return additional related + items to back up, the velero.io/exclude-from-backup label, and various other + filters that happen as items are processed. + type: integer + type: object + startTimestamp: + description: |- + StartTimestamp records the time a backup was started. + Separate from CreationTimestamp, since that value changes + on restores. + The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + validationErrors: + description: |- + ValidationErrors is a slice of all validation errors (if + applicable). + items: + type: string + nullable: true + type: array + version: + description: |- + Version is the backup format major version. + Deprecated: Please see FormatVersion + type: integer + volumeSnapshotsAttempted: + description: |- + VolumeSnapshotsAttempted is the total number of attempted + volume snapshots for this backup. + type: integer + volumeSnapshotsCompleted: + description: |- + VolumeSnapshotsCompleted is the total number of successfully + completed volume snapshots for this backup. + type: integer + warnings: + description: |- + Warnings is a count of all warning messages that were generated during + execution of the backup. The actual warnings are in the backup's log + file in object storage. + type: integer + type: object + type: object + served: true + storage: true diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 250e58b..853eb61 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -89,6 +89,8 @@ func containsOnlyNamespace(namespaces []string, namespace string) bool { // GetBackupSpecFromNonAdminBackup return BackupSpec object from NonAdminBackup spec, if no error occurs func GetBackupSpecFromNonAdminBackup(nonAdminBackup *nacv1alpha1.NonAdminBackup) (*velerov1api.BackupSpec, error) { + // TODO https://github.com/migtools/oadp-non-admin/issues/60 + // unnecessary? if nonAdminBackup == nil { return nil, fmt.Errorf("nonAdminBackup is nil") } @@ -146,6 +148,7 @@ func GenerateVeleroBackupName(namespace, nabName string) string { // UpdateNonAdminPhase updates the phase of a NonAdminBackup object with the provided phase. func UpdateNonAdminPhase(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, phase nacv1alpha1.NonAdminBackupPhase) (bool, error) { + // unnecessary? if nab == nil { return false, errors.New("NonAdminBackup object is nil") } @@ -178,6 +181,7 @@ func UpdateNonAdminPhase(ctx context.Context, r client.Client, logger logr.Logge // that the condition is set to the desired status only if it differs from the current status. // If the condition is already set to the desired status, no update is performed. func UpdateNonAdminBackupCondition(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, condition nacv1alpha1.NonAdminCondition, conditionStatus metav1.ConditionStatus, reason string, message string) (bool, error) { + // unnecessary? if nab == nil { return false, errors.New("NonAdminBackup object is nil") } @@ -219,6 +223,7 @@ func UpdateNonAdminBackupCondition(ctx context.Context, r client.Client, logger }, ) + // TODO ... Condition *set* to... ? logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition to: %s", condition)) logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Reason to: %s", reason)) logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Message to: %s", message)) diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index 85456c2..4ab2b53 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -20,6 +20,7 @@ package controller import ( "context" "errors" + "os" "time" "github.com/go-logr/logr" @@ -59,17 +60,12 @@ const ( // +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=get;list;watch;create;update;patch // Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the NonAdminBackup object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.17.0/pkg/reconcile +// move the current state of the NonAdminBackup to the desired state. func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { rLog := log.FromContext(ctx) logger := rLog.WithValues("NonAdminBackup", req.NamespacedName) + // TODO remove duplication in logs + // remove >>>? logger.V(1).Info(">>> Reconcile NonAdminBackup - loop start") // Get the NonAdminBackup object @@ -80,8 +76,10 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Reconcile loop was triggered when Velero Backup object got updated and NAB isn't there if err != nil { if apierrors.IsNotFound(err) { + // k/v's are noise? logger.V(1).Info("Non existing NonAdminBackup CR", nameField, req.Name, constant.NameSpaceString, req.Namespace) return ctrl.Result{}, nil + // should not error? } logger.Error(err, "Unable to fetch NonAdminBackup CR", nameField, req.Name, constant.NameSpaceString, req.Namespace) return ctrl.Result{}, err @@ -96,6 +94,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } + // would not be better to validate first? reconcileExit, reconcileRequeue, reconcileErr = r.ValidateVeleroBackupSpec(ctx, rLog, &nab) if reconcileRequeue { return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr @@ -131,6 +130,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque // It then returns boolean values indicating whether the reconciliation loop should requeue // and whether the status was updated. func (r *NonAdminBackupReconciler) InitNonAdminBackup(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { + // TODO logger := logrLogger.WithValues("InitNonAdminBackup", nab.Namespace) // Set initial Phase if nab.Status.Phase == constant.EmptyString { @@ -165,6 +165,7 @@ func (r *NonAdminBackupReconciler) InitNonAdminBackup(ctx context.Context, logrL // If the BackupSpec is invalid, the function sets the NonAdminBackup condition to "InvalidBackupSpec". // If the BackupSpec is valid, the function sets the NonAdminBackup condition to "BackupAccepted". func (r *NonAdminBackupReconciler) ValidateVeleroBackupSpec(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { + // TODO logger := logrLogger.WithValues("ValidateVeleroBackupSpec", nab.Namespace) // Main Validation point for the VeleroBackup included in NonAdminBackup spec @@ -176,6 +177,8 @@ func (r *NonAdminBackupReconciler) ValidateVeleroBackupSpec(ctx context.Context, if errMsgFromErr := err.Error(); errMsgFromErr != "" { errMsg = errMsgFromErr } + // TODO logs noise to user + // every logger error logs a stacktrace logger.Error(err, errMsg) updatedStatus, errUpdateStatus := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseBackingOff) @@ -235,7 +238,8 @@ func (r *NonAdminBackupReconciler) CreateVeleroBackupSpec(ctx context.Context, l } veleroBackup := velerov1api.Backup{} - err := r.Get(ctx, client.ObjectKey{Namespace: constant.OadpNamespace, Name: veleroBackupName}, &veleroBackup) + // TODO how to best refactor for be easy to test and maintain? + err := r.Get(ctx, client.ObjectKey{Namespace: os.Getenv(constant.NamespaceEnvVar), Name: veleroBackupName}, &veleroBackup) if err != nil && apierrors.IsNotFound(err) { // Create VeleroBackup @@ -255,7 +259,7 @@ func (r *NonAdminBackupReconciler) CreateVeleroBackupSpec(ctx context.Context, l veleroBackup = velerov1api.Backup{ ObjectMeta: metav1.ObjectMeta{ Name: veleroBackupName, - Namespace: constant.OadpNamespace, + Namespace: os.Getenv(constant.NamespaceEnvVar), }, Spec: *backupSpec, } diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 551e3e4..2d92b0d 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -18,66 +18,213 @@ package controller import ( "context" + "os" - ginkgov2 "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" + "github.com/migtools/oadp-non-admin/internal/common/constant" ) -var _ = ginkgov2.Describe("NonAdminBackup Controller", func() { - ginkgov2.Context("When reconciling a resource", func() { - const resourceName = "test-resource" +type nonAdminBackupReconcileScenario struct { + namespace string + nonAdminBackup string + oadpNamespace string + spec nacv1alpha1.NonAdminBackupSpec + status nacv1alpha1.NonAdminBackupStatus + doNotCreateNonAdminBackup bool +} - ctx := context.Background() +func createTestNonAdminBackup(name string, namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { + return &nacv1alpha1.NonAdminBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: spec, + } +} - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed +func ruNonAdminBackupReconcilerUntilExit(r *NonAdminBackupReconciler, scenario nonAdminBackupReconcileScenario) (reconcile.Result, error) { + result, err := r.Reconcile( + context.Background(), + reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: scenario.namespace, + Name: scenario.nonAdminBackup, + }}, + ) + if err == nil && result.Requeue { + return ruNonAdminBackupReconcilerUntilExit(r, scenario) + } + return result, err +} + +var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { + var ( + ctx = context.Background() + currentTestScenario nonAdminBackupReconcileScenario + updateTestScenario = func(scenario nonAdminBackupReconcileScenario) { + currentTestScenario = scenario + } + ) + + ginkgo.AfterEach(func() { + gomega.Expect(os.Unsetenv(constant.NamespaceEnvVar)).To(gomega.Succeed()) + if len(currentTestScenario.oadpNamespace) > 0 { + oadpNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentTestScenario.oadpNamespace, + }, + } + gomega.Expect(k8sClient.Delete(ctx, oadpNamespace)).To(gomega.Succeed()) + } + + nonAdminBackup := &nacv1alpha1.NonAdminBackup{} + if k8sClient.Get( + ctx, + types.NamespacedName{ + Name: currentTestScenario.nonAdminBackup, + Namespace: currentTestScenario.namespace, + }, + nonAdminBackup, + ) == nil { + gomega.Expect(k8sClient.Delete(ctx, nonAdminBackup)).To(gomega.Succeed()) } - nonadminbackup := &nacv1alpha1.NonAdminBackup{} - ginkgov2.BeforeEach(func() { - ginkgov2.By("creating the custom resource for the Kind NonAdminBackup") - err := k8sClient.Get(ctx, typeNamespacedName, nonadminbackup) - if err != nil && errors.IsNotFound(err) { - resource := &nacv1alpha1.NonAdminBackup{ + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentTestScenario.namespace, + }, + } + gomega.Expect(k8sClient.Delete(ctx, namespace)).To(gomega.Succeed()) + }) + + // TODO need to test more reconcile cases... + ginkgo.DescribeTable("Reconcile without error", + func(scenario nonAdminBackupReconcileScenario) { + updateTestScenario(scenario) + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scenario.namespace, + }, + } + gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) + + if !scenario.doNotCreateNonAdminBackup { + nonAdminBackup := createTestNonAdminBackup(scenario.nonAdminBackup, scenario.namespace, scenario.spec) + gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) + } + + if len(scenario.oadpNamespace) > 0 { + gomega.Expect(os.Setenv(constant.NamespaceEnvVar, scenario.oadpNamespace)).To(gomega.Succeed()) + oadpNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", + Name: scenario.oadpNamespace, }, - // TODO(user): Specify other spec details if needed. } - gomega.Expect(k8sClient.Create(ctx, resource)).To(gomega.Succeed()) + gomega.Expect(k8sClient.Create(ctx, oadpNamespace)).To(gomega.Succeed()) } - }) - - ginkgov2.AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &nacv1alpha1.NonAdminBackup{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgov2.By("Cleanup the specific resource instance NonAdminBackup") - gomega.Expect(k8sClient.Delete(ctx, resource)).To(gomega.Succeed()) - }) - ginkgov2.It("should successfully reconcile the resource", func() { - ginkgov2.By("Reconciling the created resource") - controllerReconciler := &NonAdminBackupReconciler{ + + r := &NonAdminBackupReconciler{ Client: k8sClient, - Scheme: k8sClient.Scheme(), + Scheme: testEnv.Scheme, } - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. - }) - }) + result, err := ruNonAdminBackupReconcilerUntilExit(r, scenario) + // TODO need to collect logs, so they do not appear in test run + // also assert them + + gomega.Expect(result).To(gomega.Equal(reconcile.Result{Requeue: false, RequeueAfter: 0})) + gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + + if !scenario.doNotCreateNonAdminBackup { + nonAdminBackup := &nacv1alpha1.NonAdminBackup{} + gomega.Expect(k8sClient.Get( + ctx, + types.NamespacedName{ + Name: currentTestScenario.nonAdminBackup, + Namespace: currentTestScenario.namespace, + }, + nonAdminBackup, + )).To(gomega.Succeed()) + gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) + for index := range nonAdminBackup.Status.Conditions { + gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Status).To(gomega.Equal(scenario.status.Conditions[index].Status)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Reason).To(gomega.Equal(scenario.status.Conditions[index].Reason)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) + } + } + }, + ginkgo.Entry("Should NOT accept non existing nonAdminBackup", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-1", + nonAdminBackup: "test-nonadminbackup-reconcile-1-cr", + doNotCreateNonAdminBackup: true, + // TODO should have loop end in logs + // TODO unnecessary duplication in logs + // {"NonAdminBackup": {"name":"test-nonadminbackup-reconcile-1-cr","namespace":"test-nonadminbackup-reconcile-1"}, + // "Name": "test-nonadminbackup-reconcile-1-cr", "Namespace": "test-nonadminbackup-reconcile-1"} + }), + ginkgo.Entry("Should NOT accept NonAdminBackup with empty backupSpec", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-2", + nonAdminBackup: "test-nonadminbackup-reconcile-2-cr", + spec: nacv1alpha1.NonAdminBackupSpec{}, + status: nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, + }, + }), + // TODO should not have loop start again in logs + // TODO error message duplication + // TODO should have loop end in logs + ginkgo.Entry("Should NOT accept NonAdminBackup with includedNamespaces pointing to different namespace", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-3", + nonAdminBackup: "test-nonadminbackup-reconcile-3-cr", + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{ + IncludedNamespaces: []string{"not-valid"}, + }, + }, + status: nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, + }, + }), + // TODO should not have loop start again in logs + // TODO error message duplication + // TODO should have loop end in logs + ginkgo.Entry("Should accept NonAdminBackup and create Velero Backup", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-4", + nonAdminBackup: "test-nonadminbackup-reconcile-4-cr", + oadpNamespace: "test-nonadminbackup-reconcile-4-oadp", + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{}, + }, + status: nacv1alpha1.NonAdminBackupStatus{ + // TODO should not have VeleroBackupName and VeleroBackupNamespace? + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "Validated", + Message: "Valid Backup config", + }, + { + Type: "Queued", + Status: metav1.ConditionTrue, + Reason: "BackupScheduled", + Message: "Created Velero Backup object", + }, + }, + }, + }), + // TODO should not have loop start again and again in logs + // TODO 3 condition logs, only 2 in CR status? + ) }) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index e1510ba..63d9578 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -24,6 +24,7 @@ import ( ginkgov2 "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -52,7 +53,10 @@ var _ = ginkgov2.BeforeSuite(func() { ginkgov2.By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "hack", "extra-crds"), + }, ErrorIfCRDPathMissing: true, // The BinaryAssetsDirectory is only required if you want to run the tests directly @@ -61,7 +65,7 @@ var _ = ginkgov2.BeforeSuite(func() { // Note that you must have the required binaries setup under the bin directory to perform // the tests directly. When we run make test it will be setup and used automatically. BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", - fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), } var err error @@ -72,6 +76,8 @@ var _ = ginkgov2.BeforeSuite(func() { err = nacv1alpha1.AddToScheme(scheme.Scheme) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = velerov1.AddToScheme(scheme.Scheme) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // +kubebuilder:scaffold:scheme From 3d34406f12f529e1c2b6ce44edee5d8308ef961a Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Wed, 28 Aug 2024 17:57:01 -0300 Subject: [PATCH 02/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- Makefile | 4 +- cmd/main.go | 3 +- docs/CONTRIBUTING.md | 5 + internal/common/constant/constant.go | 5 - internal/common/function/function.go | 7 +- .../controller/nonadminbackup_controller.go | 322 +++++++++++++----- .../nonadminbackup_controller_test.go | 6 +- .../predicate/nonadminbackup_predicate.go | 2 +- 8 files changed, 253 insertions(+), 101 deletions(-) diff --git a/Makefile b/Makefile index 49655ff..74b59ae 100644 --- a/Makefile +++ b/Makefile @@ -224,8 +224,8 @@ editorconfig: $(LOCALBIN) ## Download editorconfig locally if necessary. mv $(LOCALBIN)/$${ec_binary} $(EC) ;\ } -# TODO increase!!! -COVERAGE_THRESHOLD=10 +# TODO increase to 60? +COVERAGE_THRESHOLD=30 .PHONY: ci ci: simulation-test lint docker-build hadolint check-generate check-manifests ec check-images ## Run all project continuous integration (CI) checks locally. diff --git a/cmd/main.go b/cmd/main.go index 8d6be27..d865bbc 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -38,6 +38,7 @@ import ( nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" "github.com/migtools/oadp-non-admin/internal/common/constant" + "github.com/migtools/oadp-non-admin/internal/common/function" "github.com/migtools/oadp-non-admin/internal/controller" ) @@ -98,7 +99,7 @@ func main() { TLSOpts: tlsOpts, }) - if len(constant.OadpNamespace) == 0 { + if len(function.GetOADPNamespace()) == 0 { setupLog.Error(fmt.Errorf("%v environment variable is empty", constant.NamespaceEnvVar), "environment variable must be set") os.Exit(1) } diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index f2bafcf..9aa8542 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -58,6 +58,11 @@ To see the html report, run go tool cover -html=cover.out ``` +To run just controllers integration tests (which gives more verbose output), run +```sh +ginkgo run -mod=mod internal/controller -- --ginkgo.vv +``` + TODO end to end tests ### Linters and code formatters diff --git a/internal/common/constant/constant.go b/internal/common/constant/constant.go index c3748d7..6f2718c 100644 --- a/internal/common/constant/constant.go +++ b/internal/common/constant/constant.go @@ -17,8 +17,6 @@ limitations under the License. // Package constant contains all common constants used in the project package constant -import "os" - // Common labels for objects manipulated by the Non Admin Controller // Labels should be used to identify the NAC object // Annotations on the other hand should be used to define ownership @@ -37,9 +35,6 @@ const ( NamespaceEnvVar = "WATCH_NAMESPACE" ) -// OadpNamespace is the namespace OADP operator is installed -var OadpNamespace = os.Getenv(NamespaceEnvVar) - // EmptyString defines a constant for the empty string const EmptyString = "" diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 853eb61..192a97d 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -23,6 +23,7 @@ import ( "encoding/hex" "errors" "fmt" + "os" "reflect" "github.com/go-logr/logr" @@ -39,6 +40,11 @@ import ( const requiredAnnotationError = "backup does not have the required annotation '%s'" +// GetOADPNamespace get the namespace OADP operator is installed +func GetOADPNamespace() string { + return os.Getenv(constant.NamespaceEnvVar) +} + // AddNonAdminLabels return a map with both the object labels and with the default Non Admin labels. // If error occurs, a map with only the default Non Admin labels is returned func AddNonAdminLabels(labels map[string]string) map[string]string { @@ -172,7 +178,6 @@ func UpdateNonAdminPhase(ctx context.Context, r client.Client, logger logr.Logge } logger.V(1).Info(fmt.Sprintf("NonAdminBackup Phase set to: %s", phase)) - return true, nil } diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index 4ab2b53..6b3164e 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -20,7 +20,6 @@ package controller import ( "context" "errors" - "os" "time" "github.com/go-logr/logr" @@ -85,7 +84,8 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, err } - reconcileExit, reconcileRequeue, reconcileErr := r.InitNonAdminBackup(ctx, rLog, &nab) + // TODO why do we need Requeue? is not that anti performance? + reconcileExit, reconcileRequeue, reconcileErr := r.Init(ctx, rLog, &nab) if reconcileRequeue { return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr } else if reconcileExit && reconcileErr != nil { @@ -95,7 +95,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // would not be better to validate first? - reconcileExit, reconcileRequeue, reconcileErr = r.ValidateVeleroBackupSpec(ctx, rLog, &nab) + reconcileExit, reconcileRequeue, reconcileErr = r.ValidateSpec(ctx, rLog, &nab) if reconcileRequeue { return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr } else if reconcileExit && reconcileErr != nil { @@ -104,7 +104,38 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } - reconcileExit, reconcileRequeue, reconcileErr = r.CreateVeleroBackupSpec(ctx, rLog, &nab) + // TODO refactor idea + // veleroBackupName := function.GenerateVeleroBackupName(nab.Namespace, nab.Name) + // if veleroBackupName == constant.EmptyString { + // return ctrl.Result{}, reconcile.TerminalError(errors.New("unable to generate Velero Backup name")) + // } + // oadpNamespace := function.GetOADPNamespace() + // veleroBackup := velerov1api.Backup{} + // err = r.Get(ctx, client.ObjectKey{Namespace: oadpNamespace, Name: veleroBackupName}, &veleroBackup) + // if err != nil { + // if !apierrors.IsNotFound(err) { + // logger.Error(err, "Unable to fetch VeleroBackup") + // return ctrl.Result{}, reconcile.TerminalError(err) + // } + // reconcileExit, reconcileRequeue, reconcileErr = r.CreateVeleroBackup(ctx, rLog, &nab, veleroBackupName, oadpNamespace) + // if reconcileRequeue { + // return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr + // } else if reconcileExit && reconcileErr != nil { + // return ctrl.Result{}, reconcile.TerminalError(reconcileErr) + // } else if reconcileExit { + // return ctrl.Result{}, nil + // } + // reconcileExit, reconcileRequeue, reconcileErr = r.UpdateStatusAfterVeleroBackupCreation(ctx, rLog, &nab) + // if reconcileRequeue { + // return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr + // } else if reconcileExit && reconcileErr != nil { + // return ctrl.Result{}, reconcile.TerminalError(reconcileErr) + // } else if reconcileExit { + // return ctrl.Result{}, nil + // } + // } + + reconcileExit, reconcileRequeue, reconcileErr = r.UpdateSpecStatus(ctx, rLog, &nab) if reconcileRequeue { return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr } else if reconcileExit && reconcileErr != nil { @@ -117,7 +148,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } -// InitNonAdminBackup sets the New Phase on a NonAdminBackup object if it is not already set. +// Init initializes the Status.Phase from the NonAdminBackup. // // Parameters: // @@ -127,11 +158,11 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque // // The function checks if the Phase of the NonAdminBackup object is empty. // If it is empty, it sets the Phase to "New". -// It then returns boolean values indicating whether the reconciliation loop should requeue -// and whether the status was updated. -func (r *NonAdminBackupReconciler) InitNonAdminBackup(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { +// It then returns boolean values indicating whether the reconciliation loop should requeue or exit +// and error value whether the status was updated successfully. +func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { // TODO - logger := logrLogger.WithValues("InitNonAdminBackup", nab.Namespace) + logger := logrLogger.WithValues("Init", nab.Namespace) // Set initial Phase if nab.Status.Phase == constant.EmptyString { // Phase: New @@ -151,7 +182,7 @@ func (r *NonAdminBackupReconciler) InitNonAdminBackup(ctx context.Context, logrL return false, false, nil } -// ValidateVeleroBackupSpec validates the VeleroBackup Spec from the NonAdminBackup. +// ValidateSpec validates the Spec from the NonAdminBackup. // // Parameters: // @@ -162,11 +193,11 @@ func (r *NonAdminBackupReconciler) InitNonAdminBackup(ctx context.Context, logrL // The function attempts to get the BackupSpec from the NonAdminBackup object. // If an error occurs during this process, the function sets the NonAdminBackup status to "BackingOff" // and updates the corresponding condition accordingly. -// If the BackupSpec is invalid, the function sets the NonAdminBackup condition to "InvalidBackupSpec". -// If the BackupSpec is valid, the function sets the NonAdminBackup condition to "BackupAccepted". -func (r *NonAdminBackupReconciler) ValidateVeleroBackupSpec(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { +// If the BackupSpec is invalid, the function sets the NonAdminBackup condition to "InvalidBackupSpec". THIS DOES NOT HAPPEN +// If the BackupSpec is valid, the function sets the NonAdminBackup condition to "BackupAccepted". remove? +func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { // TODO - logger := logrLogger.WithValues("ValidateVeleroBackupSpec", nab.Namespace) + logger := logrLogger.WithValues("ValidateSpec", nab.Namespace) // Main Validation point for the VeleroBackup included in NonAdminBackup spec _, err := function.GetBackupSpecFromNonAdminBackup(nab) @@ -190,9 +221,8 @@ func (r *NonAdminBackupReconciler) ValidateVeleroBackupSpec(ctx context.Context, return true, false, nil } - // Continue. VeleroBackup looks fine, setting Accepted condition + // Continue. VeleroBackup looks fine, setting Accepted condition to false updatedCondition, errUpdateCondition := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionFalse, "InvalidBackupSpec", errMsg) - if errUpdateCondition != nil { logger.Error(errUpdateCondition, "Unable to set BackupAccepted Condition: False", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) return true, false, errUpdateCondition @@ -203,21 +233,26 @@ func (r *NonAdminBackupReconciler) ValidateVeleroBackupSpec(ctx context.Context, // We do not requeue - this was an error from getting Spec from NAB return true, false, err } - - updatedStatus, errUpdateStatus := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "BackupAccepted", "backup accepted") - if errUpdateStatus != nil { - logger.Error(errUpdateStatus, "Unable to set BackupAccepted Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) - return true, false, errUpdateStatus - } else if updatedStatus { - // We do requeue - The VeleroBackup got accepted and next reconcile loop will continue - // with further work on the VeleroBackup such as creating it - return false, true, nil - } - + logger.V(1).Info("NonAdminBackup CR Spec validated successfully") + + // TODO is this needed? from design, does not seem a valid condition + // updatedStatus, errUpdateStatus := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "Validated", "Valid Backup config") + // if errUpdateStatus != nil { + // logger.Error(errUpdateStatus, "Unable to set BackupAccepted Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + // return true, false, errUpdateStatus + // } else if updatedStatus { + // // We do requeue - The VeleroBackup got validated and next reconcile loop will continue + // // with further work on the VeleroBackup such as creating it + // return false, true, nil + // } + + // TODO move VeleroBackup Spec update to here? + + // TODO change? return false, false, nil } -// CreateVeleroBackupSpec creates or updates a Velero Backup object based on the provided NonAdminBackup object. +// UpdateSpecStatus updates the Spec and Status from the NonAdminBackup. // // Parameters: // @@ -225,11 +260,12 @@ func (r *NonAdminBackupReconciler) ValidateVeleroBackupSpec(ctx context.Context, // log: Logger instance for logging messages. // nab: Pointer to the NonAdminBackup object. // -// The function generates a name for the Velero Backup object based on the provided namespace and name. -// It then checks if a Velero Backup object with that name already exists. If it does not exist, it creates a new one. +// The function generates the name for the Velero Backup object based on the provided namespace and name. +// It then checks if a Velero Backup object with that name already exists. If it does not exist, it creates a new one +// and updates NonAdminBackup Status. Otherwise, updates NonAdminBackup VeleroBackup Status based on Velero Backup object Status. // The function returns boolean values indicating whether the reconciliation loop should exit or requeue -func (r *NonAdminBackupReconciler) CreateVeleroBackupSpec(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - logger := logrLogger.WithValues("CreateVeleroBackupSpec", nab.Namespace) +func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { + logger := logrLogger.WithValues("UpdateSpecStatus", nab.Namespace) veleroBackupName := function.GenerateVeleroBackupName(nab.Namespace, nab.Name) @@ -237,11 +273,14 @@ func (r *NonAdminBackupReconciler) CreateVeleroBackupSpec(ctx context.Context, l return true, false, errors.New("unable to generate Velero Backup name") } + oadpNamespace := function.GetOADPNamespace() veleroBackup := velerov1api.Backup{} - // TODO how to best refactor for be easy to test and maintain? - err := r.Get(ctx, client.ObjectKey{Namespace: os.Getenv(constant.NamespaceEnvVar), Name: veleroBackupName}, &veleroBackup) - - if err != nil && apierrors.IsNotFound(err) { + err := r.Get(ctx, client.ObjectKey{Namespace: oadpNamespace, Name: veleroBackupName}, &veleroBackup) + if err != nil { + if !apierrors.IsNotFound(err) { + logger.Error(err, "Unable to fetch VeleroBackup") + return true, false, err + } // Create VeleroBackup // Don't update phase nor conditions yet. // Those will be updated when then Reconcile loop is triggered by the VeleroBackup object @@ -259,68 +298,173 @@ func (r *NonAdminBackupReconciler) CreateVeleroBackupSpec(ctx context.Context, l veleroBackup = velerov1api.Backup{ ObjectMeta: metav1.ObjectMeta{ Name: veleroBackupName, - Namespace: os.Getenv(constant.NamespaceEnvVar), + Namespace: oadpNamespace, }, Spec: *backupSpec, } - } else if err != nil { - logger.Error(err, "Unable to fetch VeleroBackup") - return true, false, err - } else { - // We should not update already created VeleroBackup object. - // The VeleroBackup within NonAdminBackup will - // be reverted back to the previous state - the state which created VeleroBackup - // in a first place, so they will be in sync. - logger.Info("Backup already exists, updating NonAdminBackup status", nameField, veleroBackupName) - updatedNab, errBackupUpdate := function.UpdateNonAdminBackupFromVeleroBackup(ctx, r.Client, logger, nab, &veleroBackup) - // Regardless if the status was updated or not, we should not - // requeue here as it was only status update. - if errBackupUpdate != nil { - return true, false, errBackupUpdate - } else if updatedNab { - logger.V(1).Info("NonAdminBackup CR - Rqueue after Status Update") - return false, true, nil - } - return true, false, nil - } - // Ensure labels are set for the Backup object - existingLabels := veleroBackup.Labels - naManagedLabels := function.AddNonAdminLabels(existingLabels) - veleroBackup.Labels = naManagedLabels - - // Ensure annotations are set for the Backup object - existingAnnotations := veleroBackup.Annotations - ownerUUID := string(nab.ObjectMeta.UID) - nabManagedAnnotations := function.AddNonAdminBackupAnnotations(nab.Namespace, nab.Name, ownerUUID, existingAnnotations) - veleroBackup.Annotations = nabManagedAnnotations + // Ensure labels are set for the Backup object + existingLabels := veleroBackup.Labels + naManagedLabels := function.AddNonAdminLabels(existingLabels) + veleroBackup.Labels = naManagedLabels + + // Ensure annotations are set for the Backup object + existingAnnotations := veleroBackup.Annotations + ownerUUID := string(nab.ObjectMeta.UID) + nabManagedAnnotations := function.AddNonAdminBackupAnnotations(nab.Namespace, nab.Name, ownerUUID, existingAnnotations) + veleroBackup.Annotations = nabManagedAnnotations + + _, err = controllerutil.CreateOrPatch(ctx, r.Client, &veleroBackup, nil) + if err != nil { + logger.Error(err, "Failed to create backup", nameField, veleroBackupName) + return true, false, err + } + logger.Info("VeleroBackup successfully created", nameField, veleroBackupName) - _, err = controllerutil.CreateOrPatch(ctx, r.Client, &veleroBackup, nil) - if err != nil { - logger.Error(err, "Failed to create backup", nameField, veleroBackupName) - return true, false, err - } - logger.Info("VeleroBackup successfully created", nameField, veleroBackupName) + _, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseCreated) + if errUpdate != nil { + logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: Created", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + return true, false, errUpdate + } + _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "BackupAccepted", "Backup accepted") + if errUpdate != nil { + logger.Error(errUpdate, "Unable to set BackupAccepted Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + return true, false, errUpdate + } + _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionQueued, metav1.ConditionTrue, "BackupScheduled", "Created Velero Backup object") + if errUpdate != nil { + logger.Error(errUpdate, "Unable to set BackupQueued Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + return true, false, errUpdate + } - _, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseCreated) - if errUpdate != nil { - logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: Created", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) - return true, false, errUpdate + return false, false, nil } - _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "Validated", "Valid Backup config") - if errUpdate != nil { - logger.Error(errUpdate, "Unable to set BackupAccepted Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) - return true, false, errUpdate - } - _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionQueued, metav1.ConditionTrue, "BackupScheduled", "Created Velero Backup object") - if errUpdate != nil { - logger.Error(errUpdate, "Unable to set BackupQueued Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) - return true, false, errUpdate + // We should not update already created VeleroBackup object. + // The VeleroBackup within NonAdminBackup will + // be reverted back to the previous state - the state which created VeleroBackup + // in a first place, so they will be in sync. + logger.Info("Backup already exists, updating NonAdminBackup status", nameField, veleroBackupName) + updatedNab, errBackupUpdate := function.UpdateNonAdminBackupFromVeleroBackup(ctx, r.Client, logger, nab, &veleroBackup) + // Regardless if the status was updated or not, we should not + // requeue here as it was only status update. + if errBackupUpdate != nil { + return true, false, errBackupUpdate + } else if updatedNab { + logger.V(1).Info("NonAdminBackup CR - Rqueue after Status Update") + return false, true, nil } - - return false, false, nil + return true, false, nil } +// TODO refactor idea +// // CreateVeleroBackup +// // +// // TODO +// func (r *NonAdminBackupReconciler) CreateVeleroBackup(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup, veleroBackupName string, oadpNamespace string) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { +// logger := logrLogger.WithValues("CreateVeleroBackup", nab.Namespace) + +// // Create VeleroBackup +// // Don't update phase nor conditions yet. +// // Those will be updated when then Reconcile loop is triggered by the VeleroBackup object +// logger.Info("No backup found", nameField, veleroBackupName) + +// // We don't validate error here. +// // This was already validated in the ValidateVeleroBackupSpec +// backupSpec, errBackup := function.GetBackupSpecFromNonAdminBackup(nab) + +// if errBackup != nil { +// // Should never happen as it was already checked +// return true, false, errBackup +// } + +// veleroBackup := velerov1api.Backup{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: veleroBackupName, +// Namespace: oadpNamespace, +// }, +// Spec: *backupSpec, +// } + +// // Ensure labels are set for the Backup object +// existingLabels := veleroBackup.Labels +// naManagedLabels := function.AddNonAdminLabels(existingLabels) +// veleroBackup.Labels = naManagedLabels + +// // Ensure annotations are set for the Backup object +// existingAnnotations := veleroBackup.Annotations +// ownerUUID := string(nab.ObjectMeta.UID) +// nabManagedAnnotations := function.AddNonAdminBackupAnnotations(nab.Namespace, nab.Name, ownerUUID, existingAnnotations) +// veleroBackup.Annotations = nabManagedAnnotations + +// _, err := controllerutil.CreateOrPatch(ctx, r.Client, &veleroBackup, nil) +// if err != nil { +// logger.Error(err, "Failed to create Velero Backup", nameField, veleroBackupName) +// return true, false, err +// } +// logger.Info("Velero Backup successfully created", nameField, veleroBackupName) + +// // TODO +// return false, false, nil +// } + +// // UpdateStatusAfterVeleroBackupCreation +// // +// // TODO +// func (r *NonAdminBackupReconciler) UpdateStatusAfterVeleroBackupCreation(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { +// logger := logrLogger.WithValues("UpdateStatusAfterVeleroBackupCreation", nab.Namespace) + +// _, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseCreated) +// if errUpdate != nil { +// logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: Created", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) +// return true, false, errUpdate +// } +// _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "BackupAccepted", "Backup accepted") +// if errUpdate != nil { +// logger.Error(errUpdate, "Unable to set BackupAccepted Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) +// return true, false, errUpdate +// } +// _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionQueued, metav1.ConditionTrue, "BackupScheduled", "Created Velero Backup object") +// if errUpdate != nil { +// logger.Error(errUpdate, "Unable to set BackupQueued Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) +// return true, false, errUpdate +// } + +// // TODO +// return false, false, nil +// } + +// // UpdateSpecStatus updates the Spec and Status from the NonAdminBackup. +// // +// // Parameters: +// // +// // ctx: Context for the request. +// // log: Logger instance for logging messages. +// // nab: Pointer to the NonAdminBackup object. +// // +// // The function generates a name for the Velero Backup object based on the provided namespace and name. +// // It then checks if a Velero Backup object with that name already exists. If it does not exist, it creates a new one +// // and updates NonAdminBackup Status. Otherwise, updates NonAdminBackup VeleroBackup Status based on Velero Backup object Status. +// // The function returns boolean values indicating whether the reconciliation loop should exit or requeue +// func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup, veleroBackup velerov1api.Backup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { +// logger := logrLogger.WithValues("UpdateSpecStatus", nab.Namespace) + +// // We should not update already created VeleroBackup object. +// // The VeleroBackup within NonAdminBackup will +// // be reverted back to the previous state - the state which created VeleroBackup +// // in a first place, so they will be in sync. +// // logger.Info("Backup already exists, updating NonAdminBackup status", nameField, veleroBackup.Name) +// updatedNab, errBackupUpdate := function.UpdateNonAdminBackupFromVeleroBackup(ctx, r.Client, logger, nab, &veleroBackup) +// // Regardless if the status was updated or not, we should not +// // requeue here as it was only status update. +// if errBackupUpdate != nil { +// return true, false, errBackupUpdate +// } else if updatedNab { +// logger.V(1).Info("NonAdminBackup CR - Requeue after Status Update") +// return false, true, nil +// } +// return true, false, nil +// } + // SetupWithManager sets up the controller with the Manager. func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). @@ -329,7 +473,7 @@ func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { WithEventFilter(predicate.CompositePredicate{ NonAdminBackupPredicate: predicate.NonAdminBackupPredicate{}, VeleroBackupPredicate: predicate.VeleroBackupPredicate{ - OadpVeleroNamespace: constant.OadpNamespace, + OadpVeleroNamespace: function.GetOADPNamespace(), }, Context: r.Context, }). diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 2d92b0d..3ca4d74 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -212,8 +212,8 @@ var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { { Type: "Accepted", Status: metav1.ConditionTrue, - Reason: "Validated", - Message: "Valid Backup config", + Reason: "BackupAccepted", + Message: "Backup accepted", }, { Type: "Queued", @@ -226,5 +226,7 @@ var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { }), // TODO should not have loop start again and again in logs // TODO 3 condition logs, only 2 in CR status? + + // TODO create tests for single reconciles, so we can test https://github.com/migtools/oadp-non-admin/blob/master/docs/design/nab_status_update.md ) }) diff --git a/internal/predicate/nonadminbackup_predicate.go b/internal/predicate/nonadminbackup_predicate.go index d3309a3..2163bb5 100644 --- a/internal/predicate/nonadminbackup_predicate.go +++ b/internal/predicate/nonadminbackup_predicate.go @@ -73,7 +73,7 @@ func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent // New phase set, reconcile if oldPhase == constant.EmptyString && newPhase != constant.EmptyString { - logger.V(1).Info("NonAdminBsackupPredicate: Accepted Update event - phase change") + logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - phase change") return true } else if oldPhase == nacv1alpha1.NonAdminBackupPhaseNew && newPhase == nacv1alpha1.NonAdminBackupPhaseCreated { logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - phase created") From 0544117984706c1c2227e276cb2eacd344f6344b Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Fri, 30 Aug 2024 18:26:07 -0300 Subject: [PATCH 03/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- Makefile | 2 +- cmd/main.go | 2 + internal/common/function/function.go | 10 +- internal/common/function/function_test.go | 4 +- .../controller/nonadminbackup_controller.go | 269 ++++--------- .../nonadminbackup_controller_test.go | 380 ++++++++++++++---- internal/controller/suite_test.go | 16 +- .../predicate/nonadminbackup_predicate.go | 3 + 8 files changed, 387 insertions(+), 299 deletions(-) diff --git a/Makefile b/Makefile index 74b59ae..37fa5af 100644 --- a/Makefile +++ b/Makefile @@ -225,7 +225,7 @@ editorconfig: $(LOCALBIN) ## Download editorconfig locally if necessary. } # TODO increase to 60? -COVERAGE_THRESHOLD=30 +COVERAGE_THRESHOLD=40 .PHONY: ci ci: simulation-test lint docker-build hadolint check-generate check-manifests ec check-images ## Run all project continuous integration (CI) checks locally. diff --git a/cmd/main.go b/cmd/main.go index d865bbc..6c09ddb 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -135,6 +135,8 @@ func main() { if err = (&controller.NonAdminBackupReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), + // TODO context does not need to be set here??? + // add env var here?? so it is only called once still and is easy to test }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "NonAdminBackup") os.Exit(1) diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 192a97d..135fd3c 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -113,7 +113,7 @@ func GetBackupSpecFromNonAdminBackup(nonAdminBackup *nacv1alpha1.NonAdminBackup) veleroBackupSpec.IncludedNamespaces = []string{nonAdminBackup.Namespace} } else { if !containsOnlyNamespace(veleroBackupSpec.IncludedNamespaces, nonAdminBackup.Namespace) { - return nil, fmt.Errorf("spec.backupSpec.IncludedNamespaces can not contain namespaces other then: %s", nonAdminBackup.Namespace) + return nil, fmt.Errorf("spec.backupSpec.IncludedNamespaces can not contain namespaces other than: %s", nonAdminBackup.Namespace) } } @@ -228,10 +228,10 @@ func UpdateNonAdminBackupCondition(ctx context.Context, r client.Client, logger }, ) - // TODO ... Condition *set* to... ? - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition to: %s", condition)) - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Reason to: %s", reason)) - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Message to: %s", message)) + // TODO these logs should be after err check, no? + logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition set to: %s", condition)) + logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Reason set to: %s", reason)) + logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Message set to: %s", message)) // Update NAB status if err := r.Status().Update(ctx, nab); err != nil { diff --git a/internal/common/function/function_test.go b/internal/common/function/function_test.go index dcec107..9f1e645 100644 --- a/internal/common/function/function_test.go +++ b/internal/common/function/function_test.go @@ -219,7 +219,7 @@ func TestGetBackupSpecFromNonAdminBackup(t *testing.T) { assert.Error(t, err) assert.Nil(t, backupSpec) - assert.Equal(t, "spec.backupSpec.IncludedNamespaces can not contain namespaces other then: namespace2", err.Error()) + assert.Equal(t, "spec.backupSpec.IncludedNamespaces can not contain namespaces other than: namespace2", err.Error()) backupSpecInput = &velerov1api.BackupSpec{ IncludedNamespaces: []string{"namespace3"}, @@ -237,7 +237,7 @@ func TestGetBackupSpecFromNonAdminBackup(t *testing.T) { assert.Error(t, err) assert.Nil(t, backupSpec) - assert.Equal(t, "spec.backupSpec.IncludedNamespaces can not contain namespaces other then: namespace4", err.Error()) + assert.Equal(t, "spec.backupSpec.IncludedNamespaces can not contain namespaces other than: namespace4", err.Error()) } func TestGenerateVeleroBackupName(t *testing.T) { diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index 6b3164e..dc23a44 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -27,6 +27,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -43,14 +44,13 @@ import ( // NonAdminBackupReconciler reconciles a NonAdminBackup object type NonAdminBackupReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + // needed??? Context context.Context } -const ( - nameField = "Name" - requeueTimeSeconds = 10 -) +// TODO TOO MUCH!!!!!!!!!!!!!!! +const requeueTimeSeconds = 10 // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminbackups,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminbackups/status,verbs=get;update;patch @@ -63,30 +63,46 @@ const ( func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { rLog := log.FromContext(ctx) logger := rLog.WithValues("NonAdminBackup", req.NamespacedName) - // TODO remove duplication in logs - // remove >>>? - logger.V(1).Info(">>> Reconcile NonAdminBackup - loop start") + logger.V(1).Info("NonAdminBackup Reconcile start") // Get the NonAdminBackup object nab := nacv1alpha1.NonAdminBackup{} err := r.Get(ctx, req.NamespacedName, &nab) - - // Bail out when the Non Admin Backup reconcile was triggered, when the NAB got deleted - // Reconcile loop was triggered when Velero Backup object got updated and NAB isn't there if err != nil { if apierrors.IsNotFound(err) { - // k/v's are noise? - logger.V(1).Info("Non existing NonAdminBackup CR", nameField, req.Name, constant.NameSpaceString, req.Namespace) + // Delete event triggered this reconcile + logger.V(1).Info("Non existing NonAdminBackup") return ctrl.Result{}, nil - // should not error? } - logger.Error(err, "Unable to fetch NonAdminBackup CR", nameField, req.Name, constant.NameSpaceString, req.Namespace) + logger.Error(err, "Unable to fetch NonAdminBackup") return ctrl.Result{}, err } - // TODO why do we need Requeue? is not that anti performance? + // requeue on every change is the correct pattern! document this + // TODO refactor idea: do not enter on sub functions again + // TODO refactor idea: sub functions can not exit clean, that should be main func responsibility. Remove reconcileExit return param + // TODO refactor idea: + // requeue, err := r.Init(ctx, rLog, &nab) + // if err != nil { + // // handle err smart way to retry when wanted? + // return ctrl.Result{}, reconcile.TerminalError(err) + // } + // if requeue { + // return ctrl.Result{Requeue: true}, nil + // } + // SOURCE https://github.com/kubernetes-sigs/controller-runtime/blob/e6c3d139d2b6c286b1dbba6b6a95919159cfe655/pkg/internal/controller/controller.go#L286 + // Alright, after studies, I believe there are only 2 possibilities (DEV eyes): + // - re trigger reconcile + // AddRateLimited ([requeue and nill error] or [normal error]) + // will re trigger reconcile immediately, after 1 second, after 2 seconds, etc + // AddAfter ([RequeueAfter and nill error]) + // will re trigger reconcile after time + // - will not re trigger reconcile + // Forget (finish process) ([empty result and nill error] or [terminal error]) + reconcileExit, reconcileRequeue, reconcileErr := r.Init(ctx, rLog, &nab) if reconcileRequeue { + // TODO EITHER Requeue or RequeueAfter, both together do not make sense!!! return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr } else if reconcileExit && reconcileErr != nil { return ctrl.Result{}, reconcile.TerminalError(reconcileErr) @@ -104,37 +120,6 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } - // TODO refactor idea - // veleroBackupName := function.GenerateVeleroBackupName(nab.Namespace, nab.Name) - // if veleroBackupName == constant.EmptyString { - // return ctrl.Result{}, reconcile.TerminalError(errors.New("unable to generate Velero Backup name")) - // } - // oadpNamespace := function.GetOADPNamespace() - // veleroBackup := velerov1api.Backup{} - // err = r.Get(ctx, client.ObjectKey{Namespace: oadpNamespace, Name: veleroBackupName}, &veleroBackup) - // if err != nil { - // if !apierrors.IsNotFound(err) { - // logger.Error(err, "Unable to fetch VeleroBackup") - // return ctrl.Result{}, reconcile.TerminalError(err) - // } - // reconcileExit, reconcileRequeue, reconcileErr = r.CreateVeleroBackup(ctx, rLog, &nab, veleroBackupName, oadpNamespace) - // if reconcileRequeue { - // return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr - // } else if reconcileExit && reconcileErr != nil { - // return ctrl.Result{}, reconcile.TerminalError(reconcileErr) - // } else if reconcileExit { - // return ctrl.Result{}, nil - // } - // reconcileExit, reconcileRequeue, reconcileErr = r.UpdateStatusAfterVeleroBackupCreation(ctx, rLog, &nab) - // if reconcileRequeue { - // return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr - // } else if reconcileExit && reconcileErr != nil { - // return ctrl.Result{}, reconcile.TerminalError(reconcileErr) - // } else if reconcileExit { - // return ctrl.Result{}, nil - // } - // } - reconcileExit, reconcileRequeue, reconcileErr = r.UpdateSpecStatus(ctx, rLog, &nab) if reconcileRequeue { return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr @@ -144,7 +129,6 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } - logger.V(1).Info(">>> Reconcile NonAdminBackup - loop end") return ctrl.Result{}, nil } @@ -161,24 +145,22 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque // It then returns boolean values indicating whether the reconciliation loop should requeue or exit // and error value whether the status was updated successfully. func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - // TODO - logger := logrLogger.WithValues("Init", nab.Namespace) - // Set initial Phase + logger := logrLogger.WithValues("Init NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) + if nab.Status.Phase == constant.EmptyString { - // Phase: New + // // Set initial Phase to New updatedStatus, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseNew) - if errUpdate != nil { - logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: New", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: New") return true, false, errUpdate } - if updatedStatus { - logger.V(1).Info("NonAdminBackup CR - Requeue after Phase Update") + logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") return false, true, nil } } + logger.V(1).Info("NonAdminBackup Status.Phase already initialized") return false, false, nil } @@ -196,25 +178,19 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log // If the BackupSpec is invalid, the function sets the NonAdminBackup condition to "InvalidBackupSpec". THIS DOES NOT HAPPEN // If the BackupSpec is valid, the function sets the NonAdminBackup condition to "BackupAccepted". remove? func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - // TODO - logger := logrLogger.WithValues("ValidateSpec", nab.Namespace) + logger := logrLogger.WithValues("ValidateSpec NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) // Main Validation point for the VeleroBackup included in NonAdminBackup spec _, err := function.GetBackupSpecFromNonAdminBackup(nab) if err != nil { - // Use errMsg if errMsgFromErr is not available, otherwise use errMsgFromErr - errMsg := "NonAdminBackup CR does not contain valid BackupSpec" - if errMsgFromErr := err.Error(); errMsgFromErr != "" { - errMsg = errMsgFromErr - } - // TODO logs noise to user - // every logger error logs a stacktrace - logger.Error(err, errMsg) + logger.Error(err, "NonAdminBackup Spec is not valid") + // this should be one call: update both phase and condition at THE SAME TIME + // OR do requeue, CONDITION is never set to false updatedStatus, errUpdateStatus := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseBackingOff) if errUpdateStatus != nil { - logger.Error(errUpdateStatus, "Unable to set NonAdminBackup Phase: BackingOff", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + logger.Error(errUpdateStatus, "Unable to set NonAdminBackup Phase: BackingOff") return true, false, errUpdateStatus } else if updatedStatus { // We do not requeue - the State was set to BackingOff @@ -222,9 +198,9 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger } // Continue. VeleroBackup looks fine, setting Accepted condition to false - updatedCondition, errUpdateCondition := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionFalse, "InvalidBackupSpec", errMsg) + updatedCondition, errUpdateCondition := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionFalse, "InvalidBackupSpec", "NonAdminBackup does not contain valid BackupSpec") if errUpdateCondition != nil { - logger.Error(errUpdateCondition, "Unable to set BackupAccepted Condition: False", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + logger.Error(errUpdateCondition, "Unable to set BackupAccepted Condition: False") return true, false, errUpdateCondition } else if updatedCondition { return true, false, nil @@ -233,22 +209,21 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger // We do not requeue - this was an error from getting Spec from NAB return true, false, err } - logger.V(1).Info("NonAdminBackup CR Spec validated successfully") // TODO is this needed? from design, does not seem a valid condition - // updatedStatus, errUpdateStatus := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "Validated", "Valid Backup config") - // if errUpdateStatus != nil { - // logger.Error(errUpdateStatus, "Unable to set BackupAccepted Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) - // return true, false, errUpdateStatus - // } else if updatedStatus { - // // We do requeue - The VeleroBackup got validated and next reconcile loop will continue - // // with further work on the VeleroBackup such as creating it - // return false, true, nil - // } - - // TODO move VeleroBackup Spec update to here? + // this keeps being called... + // this or UpdateNonAdminBackupCondition(..., "BackupAccepted", "Backup accepted") should be deleted + updatedStatus, errUpdateStatus := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "Validated", "Valid Backup config") + if errUpdateStatus != nil { + logger.Error(errUpdateStatus, "Unable to set BackupAccepted Condition: True") + return true, false, errUpdateStatus + } else if updatedStatus { + // We do requeue - The VeleroBackup got validated and next reconcile loop will continue + // with further work on the VeleroBackup such as creating it + return false, true, nil + } - // TODO change? + logger.V(1).Info("NonAdminBackup Spec already validated") return false, false, nil } @@ -265,7 +240,7 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger // and updates NonAdminBackup Status. Otherwise, updates NonAdminBackup VeleroBackup Status based on Velero Backup object Status. // The function returns boolean values indicating whether the reconciliation loop should exit or requeue func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - logger := logrLogger.WithValues("UpdateSpecStatus", nab.Namespace) + logger := logrLogger.WithValues("UpdateSpecStatus NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) veleroBackupName := function.GenerateVeleroBackupName(nab.Namespace, nab.Name) @@ -275,16 +250,17 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog oadpNamespace := function.GetOADPNamespace() veleroBackup := velerov1api.Backup{} + veleroBackupLogger := logger.WithValues("VeleroBackup", types.NamespacedName{Name: veleroBackupName, Namespace: oadpNamespace}) err := r.Get(ctx, client.ObjectKey{Namespace: oadpNamespace, Name: veleroBackupName}, &veleroBackup) if err != nil { if !apierrors.IsNotFound(err) { - logger.Error(err, "Unable to fetch VeleroBackup") + veleroBackupLogger.Error(err, "Unable to fetch VeleroBackup") return true, false, err } // Create VeleroBackup // Don't update phase nor conditions yet. // Those will be updated when then Reconcile loop is triggered by the VeleroBackup object - logger.Info("No backup found", nameField, veleroBackupName) + veleroBackupLogger.Info("VeleroBackup not found") // We don't validate error here. // This was already validated in the ValidateVeleroBackupSpec @@ -316,24 +292,24 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog _, err = controllerutil.CreateOrPatch(ctx, r.Client, &veleroBackup, nil) if err != nil { - logger.Error(err, "Failed to create backup", nameField, veleroBackupName) + veleroBackupLogger.Error(err, "Failed to create VeleroBackup") return true, false, err } - logger.Info("VeleroBackup successfully created", nameField, veleroBackupName) + veleroBackupLogger.Info("VeleroBackup successfully created") _, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseCreated) if errUpdate != nil { - logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: Created", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: Created") return true, false, errUpdate } _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "BackupAccepted", "Backup accepted") if errUpdate != nil { - logger.Error(errUpdate, "Unable to set BackupAccepted Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + logger.Error(errUpdate, "Unable to set BackupAccepted Condition: True") return true, false, errUpdate } _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionQueued, metav1.ConditionTrue, "BackupScheduled", "Created Velero Backup object") if errUpdate != nil { - logger.Error(errUpdate, "Unable to set BackupQueued Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) + logger.Error(errUpdate, "Unable to set BackupQueued Condition: True") return true, false, errUpdate } @@ -343,127 +319,20 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog // The VeleroBackup within NonAdminBackup will // be reverted back to the previous state - the state which created VeleroBackup // in a first place, so they will be in sync. - logger.Info("Backup already exists, updating NonAdminBackup status", nameField, veleroBackupName) + veleroBackupLogger.Info("VeleroBackup already exists, updating NonAdminBackup status") updatedNab, errBackupUpdate := function.UpdateNonAdminBackupFromVeleroBackup(ctx, r.Client, logger, nab, &veleroBackup) // Regardless if the status was updated or not, we should not // requeue here as it was only status update. if errBackupUpdate != nil { return true, false, errBackupUpdate } else if updatedNab { - logger.V(1).Info("NonAdminBackup CR - Rqueue after Status Update") + logger.V(1).Info("NonAdminBackup - Requeue after Status Update") return false, true, nil } return true, false, nil } -// TODO refactor idea -// // CreateVeleroBackup -// // -// // TODO -// func (r *NonAdminBackupReconciler) CreateVeleroBackup(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup, veleroBackupName string, oadpNamespace string) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { -// logger := logrLogger.WithValues("CreateVeleroBackup", nab.Namespace) - -// // Create VeleroBackup -// // Don't update phase nor conditions yet. -// // Those will be updated when then Reconcile loop is triggered by the VeleroBackup object -// logger.Info("No backup found", nameField, veleroBackupName) - -// // We don't validate error here. -// // This was already validated in the ValidateVeleroBackupSpec -// backupSpec, errBackup := function.GetBackupSpecFromNonAdminBackup(nab) - -// if errBackup != nil { -// // Should never happen as it was already checked -// return true, false, errBackup -// } - -// veleroBackup := velerov1api.Backup{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: veleroBackupName, -// Namespace: oadpNamespace, -// }, -// Spec: *backupSpec, -// } - -// // Ensure labels are set for the Backup object -// existingLabels := veleroBackup.Labels -// naManagedLabels := function.AddNonAdminLabels(existingLabels) -// veleroBackup.Labels = naManagedLabels - -// // Ensure annotations are set for the Backup object -// existingAnnotations := veleroBackup.Annotations -// ownerUUID := string(nab.ObjectMeta.UID) -// nabManagedAnnotations := function.AddNonAdminBackupAnnotations(nab.Namespace, nab.Name, ownerUUID, existingAnnotations) -// veleroBackup.Annotations = nabManagedAnnotations - -// _, err := controllerutil.CreateOrPatch(ctx, r.Client, &veleroBackup, nil) -// if err != nil { -// logger.Error(err, "Failed to create Velero Backup", nameField, veleroBackupName) -// return true, false, err -// } -// logger.Info("Velero Backup successfully created", nameField, veleroBackupName) - -// // TODO -// return false, false, nil -// } - -// // UpdateStatusAfterVeleroBackupCreation -// // -// // TODO -// func (r *NonAdminBackupReconciler) UpdateStatusAfterVeleroBackupCreation(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { -// logger := logrLogger.WithValues("UpdateStatusAfterVeleroBackupCreation", nab.Namespace) - -// _, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseCreated) -// if errUpdate != nil { -// logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: Created", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) -// return true, false, errUpdate -// } -// _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "BackupAccepted", "Backup accepted") -// if errUpdate != nil { -// logger.Error(errUpdate, "Unable to set BackupAccepted Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) -// return true, false, errUpdate -// } -// _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionQueued, metav1.ConditionTrue, "BackupScheduled", "Created Velero Backup object") -// if errUpdate != nil { -// logger.Error(errUpdate, "Unable to set BackupQueued Condition: True", nameField, nab.Name, constant.NameSpaceString, nab.Namespace) -// return true, false, errUpdate -// } - -// // TODO -// return false, false, nil -// } - -// // UpdateSpecStatus updates the Spec and Status from the NonAdminBackup. -// // -// // Parameters: -// // -// // ctx: Context for the request. -// // log: Logger instance for logging messages. -// // nab: Pointer to the NonAdminBackup object. -// // -// // The function generates a name for the Velero Backup object based on the provided namespace and name. -// // It then checks if a Velero Backup object with that name already exists. If it does not exist, it creates a new one -// // and updates NonAdminBackup Status. Otherwise, updates NonAdminBackup VeleroBackup Status based on Velero Backup object Status. -// // The function returns boolean values indicating whether the reconciliation loop should exit or requeue -// func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup, veleroBackup velerov1api.Backup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { -// logger := logrLogger.WithValues("UpdateSpecStatus", nab.Namespace) - -// // We should not update already created VeleroBackup object. -// // The VeleroBackup within NonAdminBackup will -// // be reverted back to the previous state - the state which created VeleroBackup -// // in a first place, so they will be in sync. -// // logger.Info("Backup already exists, updating NonAdminBackup status", nameField, veleroBackup.Name) -// updatedNab, errBackupUpdate := function.UpdateNonAdminBackupFromVeleroBackup(ctx, r.Client, logger, nab, &veleroBackup) -// // Regardless if the status was updated or not, we should not -// // requeue here as it was only status update. -// if errBackupUpdate != nil { -// return true, false, errBackupUpdate -// } else if updatedNab { -// logger.V(1).Info("NonAdminBackup CR - Requeue after Status Update") -// return false, true, nil -// } -// return true, false, nil -// } +// TODO refactor idea: break in smaller functions: CreateVeleroBackup, UpdateStatusAfterVeleroBackupCreation and UpdateSpecStatus // SetupWithManager sets up the controller with the Manager. func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 3ca4d74..5b8f2b5 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -19,6 +19,7 @@ package controller import ( "context" "os" + "time" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -26,46 +27,35 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/reconcile" nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" "github.com/migtools/oadp-non-admin/internal/common/constant" ) +const testNonAdminBackupName = "test-non-admin-backup" + type nonAdminBackupReconcileScenario struct { - namespace string - nonAdminBackup string - oadpNamespace string - spec nacv1alpha1.NonAdminBackupSpec - status nacv1alpha1.NonAdminBackupStatus - doNotCreateNonAdminBackup bool + namespace string + oadpNamespace string + spec nacv1alpha1.NonAdminBackupSpec + priorStatus *nacv1alpha1.NonAdminBackupStatus + status nacv1alpha1.NonAdminBackupStatus + result reconcile.Result } -func createTestNonAdminBackup(name string, namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { +func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { return &nacv1alpha1.NonAdminBackup{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: testNonAdminBackupName, Namespace: namespace, }, Spec: spec, } } -func ruNonAdminBackupReconcilerUntilExit(r *NonAdminBackupReconciler, scenario nonAdminBackupReconcileScenario) (reconcile.Result, error) { - result, err := r.Reconcile( - context.Background(), - reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: scenario.namespace, - Name: scenario.nonAdminBackup, - }}, - ) - if err == nil && result.Requeue { - return ruNonAdminBackupReconcilerUntilExit(r, scenario) - } - return result, err -} - -var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { +var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile function", func() { var ( ctx = context.Background() currentTestScenario nonAdminBackupReconcileScenario @@ -89,7 +79,7 @@ var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { if k8sClient.Get( ctx, types.NamespacedName{ - Name: currentTestScenario.nonAdminBackup, + Name: testNonAdminBackupName, Namespace: currentTestScenario.namespace, }, nonAdminBackup, @@ -105,8 +95,38 @@ var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { gomega.Expect(k8sClient.Delete(ctx, namespace)).To(gomega.Succeed()) }) - // TODO need to test more reconcile cases... - ginkgo.DescribeTable("Reconcile without error", + ginkgo.DescribeTable("Reconcile should NOT return an error on Delete event", + func(scenario nonAdminBackupReconcileScenario) { + updateTestScenario(scenario) + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scenario.namespace, + }, + } + gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) + + result, err := (&NonAdminBackupReconciler{ + Client: k8sClient, + Scheme: testEnv.Scheme, + }).Reconcile( + context.Background(), + reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: scenario.namespace, + Name: testNonAdminBackupName, + }}, + ) + + gomega.Expect(result).To(gomega.Equal(scenario.result)) + gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + }, + ginkgo.Entry("Should accept deletion of NonAdminBackup", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-0", + result: reconcile.Result{}, + }), + ) + + ginkgo.DescribeTable("Reconcile should NOT return an error on Create and Update events", func(scenario nonAdminBackupReconcileScenario) { updateTestScenario(scenario) @@ -117,9 +137,12 @@ var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { } gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) - if !scenario.doNotCreateNonAdminBackup { - nonAdminBackup := createTestNonAdminBackup(scenario.nonAdminBackup, scenario.namespace, scenario.spec) - gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) + nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) + gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) + + if scenario.priorStatus != nil { + nonAdminBackup.Status = *scenario.priorStatus + gomega.Expect(k8sClient.Status().Update(ctx, nonAdminBackup)).To(gomega.Succeed()) } if len(scenario.oadpNamespace) > 0 { @@ -132,76 +155,271 @@ var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { gomega.Expect(k8sClient.Create(ctx, oadpNamespace)).To(gomega.Succeed()) } - r := &NonAdminBackupReconciler{ + result, err := (&NonAdminBackupReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, - } - - result, err := ruNonAdminBackupReconcilerUntilExit(r, scenario) + }).Reconcile( + context.Background(), + reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: scenario.namespace, + Name: testNonAdminBackupName, + }}, + ) // TODO need to collect logs, so they do not appear in test run // also assert them - - gomega.Expect(result).To(gomega.Equal(reconcile.Result{Requeue: false, RequeueAfter: 0})) + gomega.Expect(result).To(gomega.Equal(scenario.result)) gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) - if !scenario.doNotCreateNonAdminBackup { - nonAdminBackup := &nacv1alpha1.NonAdminBackup{} - gomega.Expect(k8sClient.Get( - ctx, - types.NamespacedName{ - Name: currentTestScenario.nonAdminBackup, - Namespace: currentTestScenario.namespace, - }, - nonAdminBackup, - )).To(gomega.Succeed()) - gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) - for index := range nonAdminBackup.Status.Conditions { - gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Status).To(gomega.Equal(scenario.status.Conditions[index].Status)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Reason).To(gomega.Equal(scenario.status.Conditions[index].Reason)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) - } + // if !scenario.doNotCreateNonAdminBackup { + // nonAdminBackup := &nacv1alpha1.NonAdminBackup{} + // gomega.Eventually(func() nacv1alpha1.NonAdminBackupPhase { + // k8sClient.Get( + // ctx, + // types.NamespacedName{ + // Name: testNonAdminBackupName, + // Namespace: currentTestScenario.namespace, + // }, + // nonAdminBackup, + // ) + // return nonAdminBackup.Status.Phase + // }, 30*time.Second, 1*time.Second).Should(gomega.Equal(scenario.status.Phase)) + + gomega.Expect(k8sClient.Get( + ctx, + types.NamespacedName{ + Name: testNonAdminBackupName, + Namespace: currentTestScenario.namespace, + }, + nonAdminBackup, + )).To(gomega.Succeed()) + gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) + gomega.Expect(nonAdminBackup.Status.VeleroBackupName).To(gomega.Equal(scenario.status.VeleroBackupName)) + gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) + gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus).To(gomega.Equal(scenario.status.VeleroBackupStatus)) + + for index := range nonAdminBackup.Status.Conditions { + gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Status).To(gomega.Equal(scenario.status.Conditions[index].Status)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Reason).To(gomega.Equal(scenario.status.Conditions[index].Reason)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) } }, - ginkgo.Entry("Should NOT accept non existing nonAdminBackup", nonAdminBackupReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-1", - nonAdminBackup: "test-nonadminbackup-reconcile-1-cr", - doNotCreateNonAdminBackup: true, - // TODO should have loop end in logs - // TODO unnecessary duplication in logs - // {"NonAdminBackup": {"name":"test-nonadminbackup-reconcile-1-cr","namespace":"test-nonadminbackup-reconcile-1"}, - // "Name": "test-nonadminbackup-reconcile-1-cr", "Namespace": "test-nonadminbackup-reconcile-1"} + ginkgo.Entry("Should accept creation of NonAdminBackup", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-1", + // even without providing spec, this does not fail... + result: reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, + status: nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseNew, + }, + }), + ginkgo.Entry("Should accept update of NonAdminBackup phase", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-2", + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{}, + }, + priorStatus: &nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseNew, + }, + result: reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, + status: nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseNew, + Conditions: []metav1.Condition{ + // Is this a valid Condition??? + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "Validated", + Message: "Valid Backup config", + }, + }, + }, }), - ginkgo.Entry("Should NOT accept NonAdminBackup with empty backupSpec", nonAdminBackupReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-2", - nonAdminBackup: "test-nonadminbackup-reconcile-2-cr", - spec: nacv1alpha1.NonAdminBackupSpec{}, + ginkgo.Entry("Should accept update of NonAdminBackup Condition", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-3", + oadpNamespace: "test-nonadminbackup-reconcile-3-oadp", + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{}, + }, + priorStatus: &nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseNew, + Conditions: []metav1.Condition{ + // Is this a valid Condition??? + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "Validated", + Message: "Valid Backup config", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + status: nacv1alpha1.NonAdminBackupStatus{ + // TODO should not have VeleroBackupName and VeleroBackupNamespace? + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "BackupAccepted", + Message: "Backup accepted", + }, + { + Type: "Queued", + Status: metav1.ConditionTrue, + Reason: "BackupScheduled", + Message: "Created Velero Backup object", + }, + }, + }, + }), + ginkgo.Entry("Should NOT accept update of NonAdminBackup phase because of empty backupSpec", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-4", + spec: nacv1alpha1.NonAdminBackupSpec{}, + priorStatus: &nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseNew, + }, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, }), - // TODO should not have loop start again in logs - // TODO error message duplication - // TODO should have loop end in logs - ginkgo.Entry("Should NOT accept NonAdminBackup with includedNamespaces pointing to different namespace", nonAdminBackupReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-3", - nonAdminBackup: "test-nonadminbackup-reconcile-3-cr", + ginkgo.Entry("Should NOT accept update of NonAdminBackup phase because of includedNamespaces pointing to different namespace", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-5", spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{ IncludedNamespaces: []string{"not-valid"}, }, }, + priorStatus: &nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseNew, + }, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, }), - // TODO should not have loop start again in logs - // TODO error message duplication - // TODO should have loop end in logs - ginkgo.Entry("Should accept NonAdminBackup and create Velero Backup", nonAdminBackupReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-4", - nonAdminBackup: "test-nonadminbackup-reconcile-4-cr", - oadpNamespace: "test-nonadminbackup-reconcile-4-oadp", + ) +}) + +var _ = ginkgo.Describe("Test full reconciles of NonAdminBackup Reconcile function", func() { + var ( + ctx, cancel = context.WithCancel(context.Background()) + currentTestScenario nonAdminBackupReconcileScenario + updateTestScenario = func(scenario nonAdminBackupReconcileScenario) { + currentTestScenario = scenario + } + ) + + ginkgo.AfterEach(func() { + gomega.Expect(os.Unsetenv(constant.NamespaceEnvVar)).To(gomega.Succeed()) + if len(currentTestScenario.oadpNamespace) > 0 { + oadpNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentTestScenario.oadpNamespace, + }, + } + gomega.Expect(k8sClient.Delete(ctx, oadpNamespace)).To(gomega.Succeed()) + } + + // nonAdminBackup := &nacv1alpha1.NonAdminBackup{} + // if k8sClient.Get( + // ctx, + // types.NamespacedName{ + // Name: testNonAdminBackupName, + // Namespace: currentTestScenario.namespace, + // }, + // nonAdminBackup, + // ) == nil { + // gomega.Expect(k8sClient.Delete(ctx, nonAdminBackup)).To(gomega.Succeed()) + // } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentTestScenario.namespace, + }, + } + gomega.Expect(k8sClient.Delete(ctx, namespace)).To(gomega.Succeed()) + cancel() + }) + + ginkgo.DescribeTable("Reconcile should NOT return an error", + func(scenario nonAdminBackupReconcileScenario) { + updateTestScenario(scenario) + + gomega.Expect(os.Setenv(constant.NamespaceEnvVar, scenario.oadpNamespace)).To(gomega.Succeed()) + oadpNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scenario.oadpNamespace, + }, + } + gomega.Expect(k8sClient.Create(ctx, oadpNamespace)).To(gomega.Succeed()) + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: k8sClient.Scheme(), + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = (&NonAdminBackupReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // TODO Be CAREFUL about FLAKES with this approach? + // study ref https://book.kubebuilder.io/cronjob-tutorial/writing-tests + go func() { + defer ginkgo.GinkgoRecover() + err = k8sManager.Start(ctx) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run manager") + }() + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scenario.namespace, + }, + } + gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) + + nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) + gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) + + gomega.Eventually(func() (nacv1alpha1.NonAdminBackupPhase, error) { + err := k8sClient.Get( + ctx, + types.NamespacedName{ + Name: testNonAdminBackupName, + Namespace: currentTestScenario.namespace, + }, + nonAdminBackup, + ) + if err != nil { + return "", err + } + return nonAdminBackup.Status.Phase, nil + // TOO MUCH TIME!!!! + }, 30*time.Second, 1*time.Second).Should(gomega.Equal(scenario.status.Phase)) + + gomega.Expect(k8sClient.Get( + ctx, + types.NamespacedName{ + Name: testNonAdminBackupName, + Namespace: currentTestScenario.namespace, + }, + nonAdminBackup, + )).To(gomega.Succeed()) + gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) + gomega.Expect(nonAdminBackup.Status.VeleroBackupName).To(gomega.Equal(scenario.status.VeleroBackupName)) + gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) + gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus).To(gomega.Equal(scenario.status.VeleroBackupStatus)) + + for index := range nonAdminBackup.Status.Conditions { + gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Status).To(gomega.Equal(scenario.status.Conditions[index].Status)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Reason).To(gomega.Equal(scenario.status.Conditions[index].Reason)) + gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) + } + }, + ginkgo.Entry("Should DO FULL happy path", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-full-1", + oadpNamespace: "test-nonadminbackup-reconcile-full-1-oadp", spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, @@ -212,8 +430,8 @@ var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { { Type: "Accepted", Status: metav1.ConditionTrue, - Reason: "BackupAccepted", - Message: "Backup accepted", + Reason: "Validated", + Message: "Valid Backup config", }, { Type: "Queued", @@ -224,9 +442,5 @@ var _ = ginkgo.Describe("Test NonAdminBackup Reconcile function", func() { }, }, }), - // TODO should not have loop start again and again in logs - // TODO 3 condition logs, only 2 in CR status? - - // TODO create tests for single reconciles, so we can test https://github.com/migtools/oadp-non-admin/blob/master/docs/design/nab_status_update.md ) }) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 63d9578..4f99475 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -22,7 +22,7 @@ import ( "runtime" "testing" - ginkgov2 "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/client-go/kubernetes/scheme" @@ -43,15 +43,15 @@ var k8sClient client.Client var testEnv *envtest.Environment func TestControllers(t *testing.T) { - gomega.RegisterFailHandler(ginkgov2.Fail) + gomega.RegisterFailHandler(ginkgo.Fail) - ginkgov2.RunSpecs(t, "Controller Suite") + ginkgo.RunSpecs(t, "Controller Suite") } -var _ = ginkgov2.BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(ginkgov2.GinkgoWriter), zap.UseDevMode(true))) +var _ = ginkgo.BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(ginkgo.GinkgoWriter), zap.UseDevMode(true))) - ginkgov2.By("bootstrapping test environment") + ginkgo.By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{ filepath.Join("..", "..", "config", "crd", "bases"), @@ -86,8 +86,8 @@ var _ = ginkgov2.BeforeSuite(func() { gomega.Expect(k8sClient).NotTo(gomega.BeNil()) }) -var _ = ginkgov2.AfterSuite(func() { - ginkgov2.By("tearing down the test environment") +var _ = ginkgo.AfterSuite(func() { + ginkgo.By("tearing down the test environment") err := testEnv.Stop() gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/internal/predicate/nonadminbackup_predicate.go b/internal/predicate/nonadminbackup_predicate.go index 2163bb5..b15e086 100644 --- a/internal/predicate/nonadminbackup_predicate.go +++ b/internal/predicate/nonadminbackup_predicate.go @@ -76,6 +76,9 @@ func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - phase change") return true } else if oldPhase == nacv1alpha1.NonAdminBackupPhaseNew && newPhase == nacv1alpha1.NonAdminBackupPhaseCreated { + // This is HARD to understand and TEST + // even though reconcile will reach Reconcile loop end + // this will trigger a new reconcile logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - phase created") return true } From a5427efac70882960c7f55e6f435e0b6183012ed Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Mon, 2 Sep 2024 20:58:29 -0300 Subject: [PATCH 04/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- Makefile | 5 +- cmd/main.go | 9 +- internal/common/constant/constant.go | 3 - internal/common/function/function.go | 5 +- internal/common/function/function_test.go | 9 +- .../controller/nonadminbackup_controller.go | 22 +- .../nonadminbackup_controller_test.go | 290 +++++++++++++----- internal/handler/velerobackup_handler.go | 6 +- internal/predicate/composite_predicate.go | 6 +- .../predicate/nonadminbackup_predicate.go | 2 + internal/predicate/velerobackup_predicate.go | 25 +- 11 files changed, 261 insertions(+), 121 deletions(-) diff --git a/Makefile b/Makefile index 37fa5af..006cf0d 100644 --- a/Makefile +++ b/Makefile @@ -225,14 +225,15 @@ editorconfig: $(LOCALBIN) ## Download editorconfig locally if necessary. } # TODO increase to 60? -COVERAGE_THRESHOLD=40 +COVERAGE_THRESHOLD=50 .PHONY: ci ci: simulation-test lint docker-build hadolint check-generate check-manifests ec check-images ## Run all project continuous integration (CI) checks locally. .PHONY: simulation-test +# TODO coverage is not in sync in what is being actually done... simulation-test: envtest ## Run unit and integration tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $(shell go list ./... | grep -v oadp-non-admin/test) -test.coverprofile cover.out -test.v -ginkgo.vv @make check-coverage .PHONY: check-coverage diff --git a/cmd/main.go b/cmd/main.go index 6c09ddb..8fa988c 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -99,7 +99,8 @@ func main() { TLSOpts: tlsOpts, }) - if len(function.GetOADPNamespace()) == 0 { + oadpNamespace := function.GetOADPNamespace() + if len(oadpNamespace) == 0 { setupLog.Error(fmt.Errorf("%v environment variable is empty", constant.NamespaceEnvVar), "environment variable must be set") os.Exit(1) } @@ -133,10 +134,10 @@ func main() { } if err = (&controller.NonAdminBackupReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + OADPNamespace: oadpNamespace, // TODO context does not need to be set here??? - // add env var here?? so it is only called once still and is easy to test }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "NonAdminBackup") os.Exit(1) diff --git a/internal/common/constant/constant.go b/internal/common/constant/constant.go index 6f2718c..b5a6dec 100644 --- a/internal/common/constant/constant.go +++ b/internal/common/constant/constant.go @@ -38,9 +38,6 @@ const ( // EmptyString defines a constant for the empty string const EmptyString = "" -// NameSpaceString k8s Namespace string -const NameSpaceString = "Namespace" - // MaxKubernetesNameLength represents maximum length of the name in k8s const MaxKubernetesNameLength = 253 diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 135fd3c..2c0ff2d 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -102,6 +102,7 @@ func GetBackupSpecFromNonAdminBackup(nonAdminBackup *nacv1alpha1.NonAdminBackup) } if nonAdminBackup.Spec.BackupSpec == nil { + // this should be Kubernetes API validation return nil, fmt.Errorf("BackupSpec is not defined") } @@ -267,6 +268,7 @@ func UpdateNonAdminBackupFromVeleroBackup(ctx context.Context, r client.Client, } // Check if BackupSpec needs to be updated + // avoid spec change? if !reflect.DeepEqual(nab.Spec.BackupSpec, &veleroBackup.Spec) { nab.Spec.BackupSpec = veleroBackup.Spec.DeepCopy() if err := r.Update(ctx, nab); err != nil { @@ -283,9 +285,8 @@ func UpdateNonAdminBackupFromVeleroBackup(ctx context.Context, r client.Client, } // CheckVeleroBackupLabels return true if Velero Backup object has required Non Admin labels, false otherwise -func CheckVeleroBackupLabels(backup *velerov1api.Backup) bool { +func CheckVeleroBackupLabels(labels map[string]string) bool { // TODO also need to check for constant.OadpLabel label? - labels := backup.GetLabels() value, exists := labels[constant.ManagedByLabel] return exists && value == constant.ManagedByLabelValue } diff --git a/internal/common/function/function_test.go b/internal/common/function/function_test.go index 9f1e645..bd95f28 100644 --- a/internal/common/function/function_test.go +++ b/internal/common/function/function_test.go @@ -22,6 +22,7 @@ import ( "reflect" "testing" + "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,6 +36,8 @@ import ( "github.com/migtools/oadp-non-admin/internal/common/constant" ) +var _ = ginkgo.Describe("PLACEHOLDER", func() {}) + func TestMergeMaps(t *testing.T) { const ( d = "d" @@ -330,7 +333,7 @@ func TestCheckVeleroBackupLabels(t *testing.T) { }, }, } - assert.True(t, CheckVeleroBackupLabels(backupWithLabel), "Expected backup to have required label") + assert.True(t, CheckVeleroBackupLabels(backupWithLabel.GetLabels()), "Expected backup to have required label") // Backup does not have the required label backupWithoutLabel := &velerov1api.Backup{ @@ -338,7 +341,7 @@ func TestCheckVeleroBackupLabels(t *testing.T) { Labels: map[string]string{}, }, } - assert.False(t, CheckVeleroBackupLabels(backupWithoutLabel), "Expected backup to not have required label") + assert.False(t, CheckVeleroBackupLabels(backupWithoutLabel.GetLabels()), "Expected backup to not have required label") // Backup has the required label with incorrect value backupWithIncorrectValue := &velerov1api.Backup{ @@ -348,5 +351,5 @@ func TestCheckVeleroBackupLabels(t *testing.T) { }, }, } - assert.False(t, CheckVeleroBackupLabels(backupWithIncorrectValue), "Expected backup to not have required label") + assert.False(t, CheckVeleroBackupLabels(backupWithIncorrectValue.GetLabels()), "Expected backup to not have required label") } diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index dc23a44..b983efa 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -44,7 +44,8 @@ import ( // NonAdminBackupReconciler reconciles a NonAdminBackup object type NonAdminBackupReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + OADPNamespace string // needed??? Context context.Context } @@ -70,8 +71,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque err := r.Get(ctx, req.NamespacedName, &nab) if err != nil { if apierrors.IsNotFound(err) { - // Delete event triggered this reconcile - logger.V(1).Info("Non existing NonAdminBackup") + logger.V(1).Info("NonAdminBackup was deleted") return ctrl.Result{}, nil } logger.Error(err, "Unable to fetch NonAdminBackup") @@ -148,7 +148,9 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log logger := logrLogger.WithValues("Init NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) if nab.Status.Phase == constant.EmptyString { - // // Set initial Phase to New + // Set initial Phase to New + // can this function be simplified to return just an error? + // can it return false, nil? updatedStatus, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseNew) if errUpdate != nil { logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: New") @@ -243,15 +245,13 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog logger := logrLogger.WithValues("UpdateSpecStatus NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) veleroBackupName := function.GenerateVeleroBackupName(nab.Namespace, nab.Name) - if veleroBackupName == constant.EmptyString { return true, false, errors.New("unable to generate Velero Backup name") } - oadpNamespace := function.GetOADPNamespace() veleroBackup := velerov1api.Backup{} - veleroBackupLogger := logger.WithValues("VeleroBackup", types.NamespacedName{Name: veleroBackupName, Namespace: oadpNamespace}) - err := r.Get(ctx, client.ObjectKey{Namespace: oadpNamespace, Name: veleroBackupName}, &veleroBackup) + veleroBackupLogger := logger.WithValues("VeleroBackup", types.NamespacedName{Name: veleroBackupName, Namespace: r.OADPNamespace}) + err := r.Get(ctx, client.ObjectKey{Namespace: r.OADPNamespace, Name: veleroBackupName}, &veleroBackup) if err != nil { if !apierrors.IsNotFound(err) { veleroBackupLogger.Error(err, "Unable to fetch VeleroBackup") @@ -274,7 +274,7 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog veleroBackup = velerov1api.Backup{ ObjectMeta: metav1.ObjectMeta{ Name: veleroBackupName, - Namespace: oadpNamespace, + Namespace: r.OADPNamespace, }, Spec: *backupSpec, } @@ -322,7 +322,7 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog veleroBackupLogger.Info("VeleroBackup already exists, updating NonAdminBackup status") updatedNab, errBackupUpdate := function.UpdateNonAdminBackupFromVeleroBackup(ctx, r.Client, logger, nab, &veleroBackup) // Regardless if the status was updated or not, we should not - // requeue here as it was only status update. + // requeue here as it was only status update. AND SPEC??? if errBackupUpdate != nil { return true, false, errBackupUpdate } else if updatedNab { @@ -342,7 +342,7 @@ func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { WithEventFilter(predicate.CompositePredicate{ NonAdminBackupPredicate: predicate.NonAdminBackupPredicate{}, VeleroBackupPredicate: predicate.VeleroBackupPredicate{ - OadpVeleroNamespace: function.GetOADPNamespace(), + OadpVeleroNamespace: r.OADPNamespace, }, Context: r.Context, }). diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 5b8f2b5..5f9237e 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -18,7 +18,9 @@ package controller import ( "context" - "os" + "log" + "strconv" + // "net/http" "time" "github.com/onsi/ginkgo/v2" @@ -31,7 +33,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" - "github.com/migtools/oadp-non-admin/internal/common/constant" ) const testNonAdminBackupName = "test-non-admin-backup" @@ -43,6 +44,10 @@ type nonAdminBackupReconcileScenario struct { priorStatus *nacv1alpha1.NonAdminBackupStatus status nacv1alpha1.NonAdminBackupStatus result reconcile.Result + // TODO create a struct for each test case! + ctx context.Context + cancel context.CancelFunc + numberOfResourceVersionChanges int } func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { @@ -65,7 +70,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func ) ginkgo.AfterEach(func() { - gomega.Expect(os.Unsetenv(constant.NamespaceEnvVar)).To(gomega.Succeed()) if len(currentTestScenario.oadpNamespace) > 0 { oadpNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -146,7 +150,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func } if len(scenario.oadpNamespace) > 0 { - gomega.Expect(os.Setenv(constant.NamespaceEnvVar, scenario.oadpNamespace)).To(gomega.Succeed()) oadpNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: scenario.oadpNamespace, @@ -156,8 +159,9 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func } result, err := (&NonAdminBackupReconciler{ - Client: k8sClient, - Scheme: testEnv.Scheme, + Client: k8sClient, + Scheme: testEnv.Scheme, + OADPNamespace: scenario.oadpNamespace, }).Reconcile( context.Background(), reconcile.Request{NamespacedName: types.NamespacedName{ @@ -170,20 +174,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(result).To(gomega.Equal(scenario.result)) gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) - // if !scenario.doNotCreateNonAdminBackup { - // nonAdminBackup := &nacv1alpha1.NonAdminBackup{} - // gomega.Eventually(func() nacv1alpha1.NonAdminBackupPhase { - // k8sClient.Get( - // ctx, - // types.NamespacedName{ - // Name: testNonAdminBackupName, - // Namespace: currentTestScenario.namespace, - // }, - // nonAdminBackup, - // ) - // return nonAdminBackup.Status.Phase - // }, 30*time.Second, 1*time.Second).Should(gomega.Equal(scenario.status.Phase)) - gomega.Expect(k8sClient.Get( ctx, types.NamespacedName{ @@ -192,6 +182,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, nonAdminBackup, )).To(gomega.Succeed()) + gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) gomega.Expect(nonAdminBackup.Status.VeleroBackupName).To(gomega.Equal(scenario.status.VeleroBackupName)) gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) @@ -206,8 +197,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, ginkgo.Entry("Should accept creation of NonAdminBackup", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-1", - // even without providing spec, this does not fail... - result: reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, + result: reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, @@ -273,6 +263,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }), ginkgo.Entry("Should NOT accept update of NonAdminBackup phase because of empty backupSpec", nonAdminBackupReconcileScenario{ + // TODO WRONG this should be a validator not a code logic namespace: "test-nonadminbackup-reconcile-4", spec: nacv1alpha1.NonAdminBackupSpec{}, priorStatus: &nacv1alpha1.NonAdminBackupStatus{ @@ -281,6 +272,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, + // should not return terminal error? }), ginkgo.Entry("Should NOT accept update of NonAdminBackup phase because of includedNamespaces pointing to different namespace", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-5", @@ -295,62 +287,71 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, + // should not return terminal error? }), ) }) -var _ = ginkgo.Describe("Test full reconciles of NonAdminBackup Reconcile function", func() { +var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", func() { var ( - ctx, cancel = context.WithCancel(context.Background()) currentTestScenario nonAdminBackupReconcileScenario updateTestScenario = func(scenario nonAdminBackupReconcileScenario) { + ctx, cancel := context.WithCancel(context.Background()) + scenario.ctx = ctx + scenario.cancel = cancel currentTestScenario = scenario } ) ginkgo.AfterEach(func() { - gomega.Expect(os.Unsetenv(constant.NamespaceEnvVar)).To(gomega.Succeed()) - if len(currentTestScenario.oadpNamespace) > 0 { - oadpNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentTestScenario.oadpNamespace, - }, - } - gomega.Expect(k8sClient.Delete(ctx, oadpNamespace)).To(gomega.Succeed()) + oadpNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentTestScenario.oadpNamespace, + }, } - - // nonAdminBackup := &nacv1alpha1.NonAdminBackup{} - // if k8sClient.Get( - // ctx, - // types.NamespacedName{ - // Name: testNonAdminBackupName, - // Namespace: currentTestScenario.namespace, - // }, - // nonAdminBackup, - // ) == nil { - // gomega.Expect(k8sClient.Delete(ctx, nonAdminBackup)).To(gomega.Succeed()) - // } + gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, oadpNamespace)).To(gomega.Succeed()) namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: currentTestScenario.namespace, }, } - gomega.Expect(k8sClient.Delete(ctx, namespace)).To(gomega.Succeed()) - cancel() + gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, namespace)).To(gomega.Succeed()) + + currentTestScenario.cancel() + // https://github.com/kubernetes-sigs/controller-runtime/issues/1280 + // clientTransport := &http.Transport{} + // clientTransport.CloseIdleConnections() + // gomega.Eventually(func() error { + // ret := ctx.Done() + // if ret != nil { + // return fmt.Errorf("not ready :(") + // } + // close(ret) + // return nil + // }, 5*time.Second, 1*time.Millisecond).Should(gomega.BeNil()) + // TODO HOW to wait process finish? + // this is still being finished in next step + time.Sleep(1 * time.Second) }) - ginkgo.DescribeTable("Reconcile should NOT return an error", + ginkgo.DescribeTable("Reconcile loop should succeed", func(scenario nonAdminBackupReconcileScenario) { updateTestScenario(scenario) - gomega.Expect(os.Setenv(constant.NamespaceEnvVar, scenario.oadpNamespace)).To(gomega.Succeed()) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scenario.namespace, + }, + } + gomega.Expect(k8sClient.Create(currentTestScenario.ctx, namespace)).To(gomega.Succeed()) + oadpNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: scenario.oadpNamespace, }, } - gomega.Expect(k8sClient.Create(ctx, oadpNamespace)).To(gomega.Succeed()) + gomega.Expect(k8sClient.Create(currentTestScenario.ctx, oadpNamespace)).To(gomega.Succeed()) k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: k8sClient.Scheme(), @@ -358,8 +359,9 @@ var _ = ginkgo.Describe("Test full reconciles of NonAdminBackup Reconcile functi gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = (&NonAdminBackupReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + OADPNamespace: scenario.oadpNamespace, }).SetupWithManager(k8sManager) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -367,44 +369,36 @@ var _ = ginkgo.Describe("Test full reconciles of NonAdminBackup Reconcile functi // study ref https://book.kubebuilder.io/cronjob-tutorial/writing-tests go func() { defer ginkgo.GinkgoRecover() - err = k8sManager.Start(ctx) + err = k8sManager.Start(currentTestScenario.ctx) gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run manager") }() - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: scenario.namespace, - }, - } - gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) - nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) - gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) - - gomega.Eventually(func() (nacv1alpha1.NonAdminBackupPhase, error) { + gomega.Expect(k8sClient.Create(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) + originalResourceVersion, err := strconv.Atoi(nonAdminBackup.DeepCopy().ResourceVersion) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + // TODO how to know reconcile finished??? + gomega.Eventually(func() (bool, error) { err := k8sClient.Get( - ctx, + currentTestScenario.ctx, types.NamespacedName{ Name: testNonAdminBackupName, - Namespace: currentTestScenario.namespace, + Namespace: scenario.namespace, }, nonAdminBackup, ) if err != nil { - return "", err + return false, err + } + currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) + if err != nil { + return false, err } - return nonAdminBackup.Status.Phase, nil + return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges, nil // TOO MUCH TIME!!!! - }, 30*time.Second, 1*time.Second).Should(gomega.Equal(scenario.status.Phase)) + }, 35*time.Second, 1*time.Second).Should(gomega.BeTrue()) - gomega.Expect(k8sClient.Get( - ctx, - types.NamespacedName{ - Name: testNonAdminBackupName, - Namespace: currentTestScenario.namespace, - }, - nonAdminBackup, - )).To(gomega.Succeed()) + log.Println("Validating NonAdminBackup Status") gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) gomega.Expect(nonAdminBackup.Status.VeleroBackupName).To(gomega.Equal(scenario.status.VeleroBackupName)) gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) @@ -416,16 +410,129 @@ var _ = ginkgo.Describe("Test full reconciles of NonAdminBackup Reconcile functi gomega.Expect(nonAdminBackup.Status.Conditions[index].Reason).To(gomega.Equal(scenario.status.Conditions[index].Reason)) gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) } + log.Println("Validation of NonAdminBackup Status completed successfully") + + veleroBackup := &v1.Backup{} + gomega.Expect(k8sClient.Get( + currentTestScenario.ctx, + types.NamespacedName{ + Name: scenario.status.VeleroBackupName, + Namespace: scenario.oadpNamespace, + }, + veleroBackup, + )).To(gomega.Succeed()) + veleroBackup.Status.Phase = v1.BackupPhaseNew + // TODO I can not call .Status().Update() for veleroBackup object: backups.velero.io "name..." not found + gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) + // every update produces to reconciles: VeleroBackupPredicate on update -> reconcile start -> update nab status -> requeue -> reconcile start + + gomega.Eventually(func() (bool, error) { + err := k8sClient.Get( + currentTestScenario.ctx, + types.NamespacedName{ + Name: testNonAdminBackupName, + Namespace: scenario.namespace, + }, + nonAdminBackup, + ) + if err != nil { + return false, err + } + currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) + if err != nil { + return false, err + } + // why 2 ResourceVersion upgrades per veleroBackup update? + return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges+2, nil + // TOO MUCH TIME!!!! + }, 15*time.Second, 1*time.Second).Should(gomega.BeTrue()) + gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhaseNew)) + + veleroBackup.Status.Phase = v1.BackupPhaseInProgress + gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) + + gomega.Eventually(func() (bool, error) { + err := k8sClient.Get( + currentTestScenario.ctx, + types.NamespacedName{ + Name: testNonAdminBackupName, + Namespace: scenario.namespace, + }, + nonAdminBackup, + ) + if err != nil { + return false, err + } + currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) + if err != nil { + return false, err + } + return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges+4, nil + // TOO MUCH TIME!!!! + }, 15*time.Second, 1*time.Second).Should(gomega.BeTrue()) + gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhaseInProgress)) + + veleroBackup.Status.Phase = v1.BackupPhaseCompleted + gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) + + gomega.Eventually(func() (bool, error) { + err := k8sClient.Get( + currentTestScenario.ctx, + types.NamespacedName{ + Name: testNonAdminBackupName, + Namespace: scenario.namespace, + }, + nonAdminBackup, + ) + if err != nil { + return false, err + } + currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) + if err != nil { + return false, err + } + return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges+6, nil + // TOO MUCH TIME!!!! + }, 15*time.Second, 1*time.Second).Should(gomega.BeTrue()) + gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhaseCompleted)) + + gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) + // wait reconcile of delete event + time.Sleep(1 * time.Second) }, - ginkgo.Entry("Should DO FULL happy path", nonAdminBackupReconcileScenario{ + // TODO logs for these tests are HUGE!!!! + // example: + // DEBUG NonAdminBackup Reconcile start {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "19f8b405-5db8-4bf4-b4a0-24ecdd0ae187", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}} + ginkgo.Entry("Should create, update and delete NonAdminBackup", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-full-1", oadpNamespace: "test-nonadminbackup-reconcile-full-1-oadp", spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, status: nacv1alpha1.NonAdminBackupStatus{ - // TODO should not have VeleroBackupName and VeleroBackupNamespace? - Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + VeleroBackupName: "nab-test-nonadminbackup-reconcile-full-1-c9dd6af01e2e2a", + VeleroBackupNamespace: "test-nonadminbackup-reconcile-full-1-oadp", + VeleroBackupStatus: &v1.BackupStatus{ + Version: 0, + FormatVersion: "", + Expiration: nil, + Phase: "", + ValidationErrors: nil, + StartTimestamp: nil, + CompletionTimestamp: nil, + VolumeSnapshotsAttempted: 0, + VolumeSnapshotsCompleted: 0, + FailureReason: "", + Warnings: 0, + Errors: 0, + Progress: nil, + CSIVolumeSnapshotsAttempted: 0, + CSIVolumeSnapshotsCompleted: 0, + BackupItemOperationsAttempted: 0, + BackupItemOperationsCompleted: 0, + BackupItemOperationsFailed: 0, + }, Conditions: []metav1.Condition{ { Type: "Accepted", @@ -441,6 +548,33 @@ var _ = ginkgo.Describe("Test full reconciles of NonAdminBackup Reconcile functi }, }, }, + numberOfResourceVersionChanges: 13, // should be similar to reconcile starts??? }), + // PRIOR to mocking velero backup updates + // events 10: 2 creates (1 nab, 1 velero), 8 update event (all nab, 5 rejected) + // 6 reconcile starts + // time: 30s-20s + // TODO saw this flake!! + // 2024-09-02T10:58:31-03:00 ERROR NonAdminBackup Condition - Failed to update {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "fd1db7a8-6ed5-40ea-b5f6-03c4b1b88dd1", "ValidateSpec NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "error": "Operation cannot be fulfilled on nonadminbackups.nac.oadp.openshift.io \"test-non-admin-backup\": the object has been modified; please apply your changes to the latest version and try again"} + // stacktrace... + // 2024-09-02T10:58:31-03:00 ERROR Unable to set BackupAccepted Condition: True {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "fd1db7a8-6ed5-40ea-b5f6-03c4b1b88dd1", "ValidateSpec NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "error": "Operation cannot be fulfilled on nonadminbackups.nac.oadp.openshift.io \"test-non-admin-backup\": the object has been modified; please apply your changes to the latest version and try again"} + // stacktrace... + // 2024-09-02T10:58:31-03:00 ERROR Reconciler error {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "fd1db7a8-6ed5-40ea-b5f6-03c4b1b88dd1", "error": "terminal error: Operation cannot be fulfilled on nonadminbackups.nac.oadp.openshift.io \"test-non-admin-backup\": the object has been modified; please apply your changes to the latest version and try again"} + // stacktrace... + + // ginkgo.Entry("Should DO FULL sad path", nonAdminBackupReconcileScenario{ + // namespace: "test-nonadminbackup-reconcile-full-2", + // oadpNamespace: "test-nonadminbackup-reconcile-full-2-oadp", + // spec: nacv1alpha1.NonAdminBackupSpec{}, + // priorStatus: &nacv1alpha1.NonAdminBackupStatus{ + // Phase: nacv1alpha1.NonAdminBackupPhaseNew, + // }, + // status: nacv1alpha1.NonAdminBackupStatus{ + // Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, + // }, + // numberOfResourceVersionChanges: 2, + // }), + // events 3: 1 create, 2 update (1 rejected) + // 2 reconcile starts ) }) diff --git a/internal/handler/velerobackup_handler.go b/internal/handler/velerobackup_handler.go index d1fe4c1..9c9ac02 100644 --- a/internal/handler/velerobackup_handler.go +++ b/internal/handler/velerobackup_handler.go @@ -32,7 +32,8 @@ import ( // VeleroBackupHandler contains event handlers for Velero Backup objects type VeleroBackupHandler struct { - Logger logr.Logger + // why this? + // Logger logr.Logger } func getVeleroBackupHandlerLogger(ctx context.Context, name, namespace string) logr.Logger { @@ -45,6 +46,7 @@ func (*VeleroBackupHandler) Create(ctx context.Context, evt event.CreateEvent, _ name := evt.Object.GetName() logger := getVeleroBackupHandlerLogger(ctx, name, nameSpace) logger.V(1).Info("Received Create VeleroBackupHandler") + // is this func necessary? } // Update event handler @@ -55,7 +57,6 @@ func (*VeleroBackupHandler) Update(ctx context.Context, evt event.UpdateEvent, q logger.V(1).Info("Received Update VeleroBackupHandler") annotations := evt.ObjectNew.GetAnnotations() - if annotations == nil { logger.V(1).Info("Backup annotations not found") return @@ -73,6 +74,7 @@ func (*VeleroBackupHandler) Update(ctx context.Context, evt event.UpdateEvent, q return } + // TODO AddRateLimited? q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ Name: nabOriginName, Namespace: nabOriginNamespace, diff --git a/internal/predicate/composite_predicate.go b/internal/predicate/composite_predicate.go index d6f9aef..113ffda 100644 --- a/internal/predicate/composite_predicate.go +++ b/internal/predicate/composite_predicate.go @@ -37,13 +37,11 @@ type CompositePredicate struct { func (p CompositePredicate) Create(evt event.CreateEvent) bool { switch evt.Object.(type) { case *nacv1alpha1.NonAdminBackup: - // Apply NonAdminBackupPredicate return p.NonAdminBackupPredicate.Create(p.Context, evt) case *velerov1api.Backup: - // Apply VeleroBackupPredicate return p.VeleroBackupPredicate.Create(p.Context, evt) + // return false? as we will always create ourselves default: - // Unknown object type, return false return false } } @@ -67,6 +65,7 @@ func (p CompositePredicate) Delete(evt event.DeleteEvent) bool { return p.NonAdminBackupPredicate.Delete(p.Context, evt) case *velerov1api.Backup: return p.VeleroBackupPredicate.Delete(p.Context, evt) + // return false default: return false } @@ -74,6 +73,7 @@ func (p CompositePredicate) Delete(evt event.DeleteEvent) bool { // Generic event filter func (p CompositePredicate) Generic(evt event.GenericEvent) bool { + // TODO Is this necessary? could not be a simple return false function? switch evt.Object.(type) { case *nacv1alpha1.NonAdminBackup: return p.NonAdminBackupPredicate.Generic(p.Context, evt) diff --git a/internal/predicate/nonadminbackup_predicate.go b/internal/predicate/nonadminbackup_predicate.go index b15e086..aca0bb4 100644 --- a/internal/predicate/nonadminbackup_predicate.go +++ b/internal/predicate/nonadminbackup_predicate.go @@ -43,6 +43,7 @@ func (NonAdminBackupPredicate) Create(ctx context.Context, evt event.CreateEvent name := evt.Object.GetName() logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) logger.V(1).Info("NonAdminBackupPredicate: Received Create event") + // DO we need all this logic? should not just be return true here? if nonAdminBackup, ok := evt.Object.(*nacv1alpha1.NonAdminBackup); ok { if nonAdminBackup.Status.Phase == constant.EmptyString || nonAdminBackup.Status.Phase == nacv1alpha1.NonAdminBackupPhaseNew { logger.V(1).Info("NonAdminBackupPredicate: Accepted Create event") @@ -104,5 +105,6 @@ func (NonAdminBackupPredicate) Generic(ctx context.Context, evt event.GenericEve name := evt.Object.GetName() logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) logger.V(1).Info("NonAdminBackupPredicate: Accepted Generic event") + // refactor: all functions start the same way, move this initialization to a separate function return true } diff --git a/internal/predicate/velerobackup_predicate.go b/internal/predicate/velerobackup_predicate.go index 6f436bd..f12d5a8 100644 --- a/internal/predicate/velerobackup_predicate.go +++ b/internal/predicate/velerobackup_predicate.go @@ -20,7 +20,6 @@ import ( "context" "github.com/go-logr/logr" - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" @@ -43,19 +42,16 @@ func getBackupPredicateLogger(ctx context.Context, name, namespace string) logr. // Create event filter func (veleroBackupPredicate VeleroBackupPredicate) Create(ctx context.Context, evt event.CreateEvent) bool { - if backup, ok := evt.Object.(*velerov1api.Backup); ok { - nameSpace := evt.Object.GetNamespace() - if nameSpace != veleroBackupPredicate.OadpVeleroNamespace { - return false - } - - name := evt.Object.GetName() - logger := getBackupPredicateLogger(ctx, name, nameSpace) - logger.V(1).Info("VeleroBackupPredicate: Received Create event") - - return function.CheckVeleroBackupLabels(backup) + nameSpace := evt.Object.GetNamespace() + name := evt.Object.GetName() + logger := getBackupPredicateLogger(ctx, name, nameSpace) + logger.V(1).Info("VeleroBackupPredicate: Received Create event") + // TODO log accepted or not + if nameSpace != veleroBackupPredicate.OadpVeleroNamespace { + return false } - return false + return function.CheckVeleroBackupLabels(evt.Object.GetLabels()) + // refactor idea, move all validation to a function, predicate functions would just need to call it and log info } // Update event filter @@ -64,11 +60,14 @@ func (veleroBackupPredicate VeleroBackupPredicate) Update(ctx context.Context, e name := evt.ObjectNew.GetName() logger := getBackupPredicateLogger(ctx, name, nameSpace) logger.V(1).Info("VeleroBackupPredicate: Received Update event") + // TODO log accepted or not + // should not check labels? return nameSpace == veleroBackupPredicate.OadpVeleroNamespace } // Delete event filter func (VeleroBackupPredicate) Delete(_ context.Context, _ event.DeleteEvent) bool { + // only create function when needed? changing in composite to simply return false return false } From 698144a669e2b48b15f11afdaf8567a1bdd1fd95 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Tue, 3 Sep 2024 09:46:35 -0300 Subject: [PATCH 05/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- docs/CONTRIBUTING.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 9aa8542..f2bafcf 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -58,11 +58,6 @@ To see the html report, run go tool cover -html=cover.out ``` -To run just controllers integration tests (which gives more verbose output), run -```sh -ginkgo run -mod=mod internal/controller -- --ginkgo.vv -``` - TODO end to end tests ### Linters and code formatters From 2a6dd14498f0ecafd084544c7990dbe606466210 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Tue, 3 Sep 2024 20:38:43 -0300 Subject: [PATCH 06/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- internal/common/function/function.go | 33 +++------ .../controller/nonadminbackup_controller.go | 37 ++++++---- .../nonadminbackup_controller_test.go | 72 ++++++++----------- .../predicate/nonadminbackup_predicate.go | 3 +- internal/predicate/velerobackup_predicate.go | 2 +- 5 files changed, 64 insertions(+), 83 deletions(-) diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 2c0ff2d..dcde669 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -187,40 +187,20 @@ func UpdateNonAdminPhase(ctx context.Context, r client.Client, logger logr.Logge // that the condition is set to the desired status only if it differs from the current status. // If the condition is already set to the desired status, no update is performed. func UpdateNonAdminBackupCondition(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, condition nacv1alpha1.NonAdminCondition, conditionStatus metav1.ConditionStatus, reason string, message string) (bool, error) { + // log should be parent responsibility? // unnecessary? if nab == nil { return false, errors.New("NonAdminBackup object is nil") } - // Ensure phase and condition are valid - if condition == constant.EmptyString { - return false, errors.New("NonAdminBackup Condition cannot be empty") - } - - if conditionStatus == constant.EmptyString { - return false, errors.New("NonAdminBackup Condition Status cannot be empty") - } else if conditionStatus != metav1.ConditionTrue && conditionStatus != metav1.ConditionFalse && conditionStatus != metav1.ConditionUnknown { - return false, errors.New("NonAdminBackup Condition Status must be valid metav1.ConditionStatus") - } - - if reason == constant.EmptyString { - return false, errors.New("NonAdminBackup Condition Reason cannot be empty") - } - + // is not this metav1 responsibility? if message == constant.EmptyString { return false, errors.New("NonAdminBackup Condition Message cannot be empty") } - // Check if the condition is already set to the desired status - currentCondition := apimeta.FindStatusCondition(nab.Status.Conditions, string(condition)) - if currentCondition != nil && currentCondition.Status == conditionStatus && currentCondition.Reason == reason && currentCondition.Message == message { - // Condition is already set to the desired status, no need to update - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition is already set to: %s", condition)) - return false, nil - } - + // move this if outside func? // Update NAB status condition - apimeta.SetStatusCondition(&nab.Status.Conditions, + update := apimeta.SetStatusCondition(&nab.Status.Conditions, metav1.Condition{ Type: string(condition), Status: conditionStatus, @@ -228,6 +208,11 @@ func UpdateNonAdminBackupCondition(ctx context.Context, r client.Client, logger Message: message, }, ) + if !update { + // would remove log + logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition is already set to: %s", condition)) + return false, nil + } // TODO these logs should be after err check, no? logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition set to: %s", condition)) diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index b983efa..a118bb8 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -47,7 +47,7 @@ type NonAdminBackupReconciler struct { Scheme *runtime.Scheme OADPNamespace string // needed??? - Context context.Context + // Context context.Context } // TODO TOO MUCH!!!!!!!!!!!!!!! @@ -62,8 +62,12 @@ const requeueTimeSeconds = 10 // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the NonAdminBackup to the desired state. func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - rLog := log.FromContext(ctx) - logger := rLog.WithValues("NonAdminBackup", req.NamespacedName) + // logger := log.FromContext(r.Context) + // {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"t","namespace":"n"}, "namespace": "n", "name": "t", "reconcileID": "x-x-x"} + // I think there is duplication with controller and controllerKind (and controllerGroup is noy useful) + // duplication with NonAdminBackup, namespace and name + // there is a use for reconcileID? + logger := log.FromContext(ctx) logger.V(1).Info("NonAdminBackup Reconcile start") // Get the NonAdminBackup object @@ -100,7 +104,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque // - will not re trigger reconcile // Forget (finish process) ([empty result and nill error] or [terminal error]) - reconcileExit, reconcileRequeue, reconcileErr := r.Init(ctx, rLog, &nab) + reconcileExit, reconcileRequeue, reconcileErr := r.Init(ctx, logger, &nab) if reconcileRequeue { // TODO EITHER Requeue or RequeueAfter, both together do not make sense!!! return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr @@ -111,7 +115,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // would not be better to validate first? - reconcileExit, reconcileRequeue, reconcileErr = r.ValidateSpec(ctx, rLog, &nab) + reconcileExit, reconcileRequeue, reconcileErr = r.ValidateSpec(ctx, logger, &nab) if reconcileRequeue { return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr } else if reconcileExit && reconcileErr != nil { @@ -120,7 +124,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } - reconcileExit, reconcileRequeue, reconcileErr = r.UpdateSpecStatus(ctx, rLog, &nab) + reconcileExit, reconcileRequeue, reconcileErr = r.UpdateSpecStatus(ctx, logger, &nab) if reconcileRequeue { return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr } else if reconcileExit && reconcileErr != nil { @@ -145,7 +149,8 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque // It then returns boolean values indicating whether the reconciliation loop should requeue or exit // and error value whether the status was updated successfully. func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - logger := logrLogger.WithValues("Init NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) + // logger := logrLogger.WithValues("Init NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) + logger := logrLogger if nab.Status.Phase == constant.EmptyString { // Set initial Phase to New @@ -180,11 +185,11 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log // If the BackupSpec is invalid, the function sets the NonAdminBackup condition to "InvalidBackupSpec". THIS DOES NOT HAPPEN // If the BackupSpec is valid, the function sets the NonAdminBackup condition to "BackupAccepted". remove? func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - logger := logrLogger.WithValues("ValidateSpec NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) + // logger := logrLogger.WithValues("ValidateSpec NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) + logger := logrLogger // Main Validation point for the VeleroBackup included in NonAdminBackup spec _, err := function.GetBackupSpecFromNonAdminBackup(nab) - if err != nil { logger.Error(err, "NonAdminBackup Spec is not valid") @@ -202,7 +207,7 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger // Continue. VeleroBackup looks fine, setting Accepted condition to false updatedCondition, errUpdateCondition := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionFalse, "InvalidBackupSpec", "NonAdminBackup does not contain valid BackupSpec") if errUpdateCondition != nil { - logger.Error(errUpdateCondition, "Unable to set BackupAccepted Condition: False") + logger.Error(errUpdateCondition, "Unable to set BackupAccepted Condition: Accepted False") return true, false, errUpdateCondition } else if updatedCondition { return true, false, nil @@ -217,7 +222,7 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger // this or UpdateNonAdminBackupCondition(..., "BackupAccepted", "Backup accepted") should be deleted updatedStatus, errUpdateStatus := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "Validated", "Valid Backup config") if errUpdateStatus != nil { - logger.Error(errUpdateStatus, "Unable to set BackupAccepted Condition: True") + logger.Error(errUpdateStatus, "Unable to set BackupAccepted Condition: Accepted True") return true, false, errUpdateStatus } else if updatedStatus { // We do requeue - The VeleroBackup got validated and next reconcile loop will continue @@ -242,7 +247,8 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger // and updates NonAdminBackup Status. Otherwise, updates NonAdminBackup VeleroBackup Status based on Velero Backup object Status. // The function returns boolean values indicating whether the reconciliation loop should exit or requeue func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - logger := logrLogger.WithValues("UpdateSpecStatus NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) + // logger := logrLogger.WithValues("UpdateSpecStatus NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) + logger := logrLogger veleroBackupName := function.GenerateVeleroBackupName(nab.Namespace, nab.Name) if veleroBackupName == constant.EmptyString { @@ -297,6 +303,7 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog } veleroBackupLogger.Info("VeleroBackup successfully created") + // TODO merge this update calls? I think this is the error cause _, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseCreated) if errUpdate != nil { logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: Created") @@ -304,12 +311,12 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog } _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "BackupAccepted", "Backup accepted") if errUpdate != nil { - logger.Error(errUpdate, "Unable to set BackupAccepted Condition: True") + logger.Error(errUpdate, "Unable to set BackupAccepted Condition: Accepted True") return true, false, errUpdate } _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionQueued, metav1.ConditionTrue, "BackupScheduled", "Created Velero Backup object") if errUpdate != nil { - logger.Error(errUpdate, "Unable to set BackupQueued Condition: True") + logger.Error(errUpdate, "Unable to set BackupQueued Condition: Queued True") return true, false, errUpdate } @@ -344,7 +351,7 @@ func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { VeleroBackupPredicate: predicate.VeleroBackupPredicate{ OadpVeleroNamespace: r.OADPNamespace, }, - Context: r.Context, + // Context: r.Context, }). Complete(r) } diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 5f9237e..9d185df 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -33,6 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" + "github.com/migtools/oadp-non-admin/internal/common/constant" ) const testNonAdminBackupName = "test-non-admin-backup" @@ -70,15 +71,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func ) ginkgo.AfterEach(func() { - if len(currentTestScenario.oadpNamespace) > 0 { - oadpNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentTestScenario.oadpNamespace, - }, - } - gomega.Expect(k8sClient.Delete(ctx, oadpNamespace)).To(gomega.Succeed()) - } - nonAdminBackup := &nacv1alpha1.NonAdminBackup{} if k8sClient.Get( ctx, @@ -91,6 +83,15 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(k8sClient.Delete(ctx, nonAdminBackup)).To(gomega.Succeed()) } + if len(currentTestScenario.oadpNamespace) > 0 { + oadpNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentTestScenario.oadpNamespace, + }, + } + gomega.Expect(k8sClient.Delete(ctx, oadpNamespace)).To(gomega.Succeed()) + } + namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: currentTestScenario.namespace, @@ -141,14 +142,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func } gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) - nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) - gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) - - if scenario.priorStatus != nil { - nonAdminBackup.Status = *scenario.priorStatus - gomega.Expect(k8sClient.Status().Update(ctx, nonAdminBackup)).To(gomega.Succeed()) - } - if len(scenario.oadpNamespace) > 0 { oadpNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -158,6 +151,16 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(k8sClient.Create(ctx, oadpNamespace)).To(gomega.Succeed()) } + nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) + gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) + + if scenario.priorStatus != nil { + nonAdminBackup.Status = *scenario.priorStatus + gomega.Expect(k8sClient.Status().Update(ctx, nonAdminBackup)).To(gomega.Succeed()) + } + priorResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) + gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + result, err := (&NonAdminBackupReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, @@ -194,6 +197,10 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(nonAdminBackup.Status.Conditions[index].Reason).To(gomega.Equal(scenario.status.Conditions[index].Reason)) gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) } + + currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) + gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + gomega.Expect(currentResourceVersion - priorResourceVersion).To(gomega.Equal(1)) }, ginkgo.Entry("Should accept creation of NonAdminBackup", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-1", @@ -365,6 +372,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }).SetupWithManager(k8sManager) gomega.Expect(err).ToNot(gomega.HaveOccurred()) + // I am seeing test overlap... // TODO Be CAREFUL about FLAKES with this approach? // study ref https://book.kubebuilder.io/cronjob-tutorial/writing-tests go func() { @@ -402,7 +410,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) gomega.Expect(nonAdminBackup.Status.VeleroBackupName).To(gomega.Equal(scenario.status.VeleroBackupName)) gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) - gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus).To(gomega.Equal(scenario.status.VeleroBackupStatus)) + gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhase(constant.EmptyString))) for index := range nonAdminBackup.Status.Conditions { gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) @@ -424,8 +432,9 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", veleroBackup.Status.Phase = v1.BackupPhaseNew // TODO I can not call .Status().Update() for veleroBackup object: backups.velero.io "name..." not found gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) - // every update produces to reconciles: VeleroBackupPredicate on update -> reconcile start -> update nab status -> requeue -> reconcile start + // every update produces 2 reconciles: VeleroBackupPredicate on update -> reconcile start -> update nab status -> requeue -> reconcile start + // only one mock update should be enough, right? gomega.Eventually(func() (bool, error) { err := k8sClient.Get( currentTestScenario.ctx, @@ -500,9 +509,6 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", // wait reconcile of delete event time.Sleep(1 * time.Second) }, - // TODO logs for these tests are HUGE!!!! - // example: - // DEBUG NonAdminBackup Reconcile start {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "19f8b405-5db8-4bf4-b4a0-24ecdd0ae187", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}} ginkgo.Entry("Should create, update and delete NonAdminBackup", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-full-1", oadpNamespace: "test-nonadminbackup-reconcile-full-1-oadp", @@ -513,27 +519,8 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", Phase: nacv1alpha1.NonAdminBackupPhaseCreated, VeleroBackupName: "nab-test-nonadminbackup-reconcile-full-1-c9dd6af01e2e2a", VeleroBackupNamespace: "test-nonadminbackup-reconcile-full-1-oadp", - VeleroBackupStatus: &v1.BackupStatus{ - Version: 0, - FormatVersion: "", - Expiration: nil, - Phase: "", - ValidationErrors: nil, - StartTimestamp: nil, - CompletionTimestamp: nil, - VolumeSnapshotsAttempted: 0, - VolumeSnapshotsCompleted: 0, - FailureReason: "", - Warnings: 0, - Errors: 0, - Progress: nil, - CSIVolumeSnapshotsAttempted: 0, - CSIVolumeSnapshotsCompleted: 0, - BackupItemOperationsAttempted: 0, - BackupItemOperationsCompleted: 0, - BackupItemOperationsFailed: 0, - }, Conditions: []metav1.Condition{ + // Is this a valid Condition??? { Type: "Accepted", Status: metav1.ConditionTrue, @@ -554,6 +541,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", // events 10: 2 creates (1 nab, 1 velero), 8 update event (all nab, 5 rejected) // 6 reconcile starts // time: 30s-20s + // // TODO saw this flake!! // 2024-09-02T10:58:31-03:00 ERROR NonAdminBackup Condition - Failed to update {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "fd1db7a8-6ed5-40ea-b5f6-03c4b1b88dd1", "ValidateSpec NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "error": "Operation cannot be fulfilled on nonadminbackups.nac.oadp.openshift.io \"test-non-admin-backup\": the object has been modified; please apply your changes to the latest version and try again"} // stacktrace... diff --git a/internal/predicate/nonadminbackup_predicate.go b/internal/predicate/nonadminbackup_predicate.go index aca0bb4..030ea71 100644 --- a/internal/predicate/nonadminbackup_predicate.go +++ b/internal/predicate/nonadminbackup_predicate.go @@ -30,7 +30,7 @@ import ( // NonAdminBackupPredicate contains event filters for Non Admin Backup objects type NonAdminBackupPredicate struct { - Logger logr.Logger + // Logger logr.Logger } func getNonAdminBackupPredicateLogger(ctx context.Context, name, namespace string) logr.Logger { @@ -62,6 +62,7 @@ func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) logger.V(1).Info("NonAdminBackupPredicate: Received Update event") + // resourceVersion? if evt.ObjectNew.GetGeneration() != evt.ObjectOld.GetGeneration() { logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - generation change") return true diff --git a/internal/predicate/velerobackup_predicate.go b/internal/predicate/velerobackup_predicate.go index f12d5a8..dd145d8 100644 --- a/internal/predicate/velerobackup_predicate.go +++ b/internal/predicate/velerobackup_predicate.go @@ -32,7 +32,7 @@ type VeleroBackupPredicate struct { // We are watching only Velero Backup objects within // namespace where OADP is. OadpVeleroNamespace string - Logger logr.Logger + // Logger logr.Logger } // TODO try to remove calls to get logger functions, try to initialize it From 2c2fb3da11a032faffe6b538d824c8666ae2bfee Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Wed, 4 Sep 2024 10:54:28 -0300 Subject: [PATCH 07/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- internal/common/function/function.go | 31 ++-------- internal/common/function/function_test.go | 11 +--- .../controller/nonadminbackup_controller.go | 57 +++++++++++++++---- 3 files changed, 53 insertions(+), 46 deletions(-) diff --git a/internal/common/function/function.go b/internal/common/function/function.go index dcde669..d1bb83d 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -96,11 +96,6 @@ func containsOnlyNamespace(namespaces []string, namespace string) bool { // GetBackupSpecFromNonAdminBackup return BackupSpec object from NonAdminBackup spec, if no error occurs func GetBackupSpecFromNonAdminBackup(nonAdminBackup *nacv1alpha1.NonAdminBackup) (*velerov1api.BackupSpec, error) { // TODO https://github.com/migtools/oadp-non-admin/issues/60 - // unnecessary? - if nonAdminBackup == nil { - return nil, fmt.Errorf("nonAdminBackup is nil") - } - if nonAdminBackup.Spec.BackupSpec == nil { // this should be Kubernetes API validation return nil, fmt.Errorf("BackupSpec is not defined") @@ -155,18 +150,12 @@ func GenerateVeleroBackupName(namespace, nabName string) string { // UpdateNonAdminPhase updates the phase of a NonAdminBackup object with the provided phase. func UpdateNonAdminPhase(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, phase nacv1alpha1.NonAdminBackupPhase) (bool, error) { - // unnecessary? - if nab == nil { - return false, errors.New("NonAdminBackup object is nil") - } - // Ensure phase is valid if phase == constant.EmptyString { return false, errors.New("NonAdminBackupPhase cannot be empty") } if nab.Status.Phase == phase { - // No change, no need to update logger.V(1).Info("NonAdminBackup Phase is already up to date") return false, nil } @@ -188,19 +177,13 @@ func UpdateNonAdminPhase(ctx context.Context, r client.Client, logger logr.Logge // If the condition is already set to the desired status, no update is performed. func UpdateNonAdminBackupCondition(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, condition nacv1alpha1.NonAdminCondition, conditionStatus metav1.ConditionStatus, reason string, message string) (bool, error) { // log should be parent responsibility? - // unnecessary? - if nab == nil { - return false, errors.New("NonAdminBackup object is nil") - } - // is not this metav1 responsibility? if message == constant.EmptyString { return false, errors.New("NonAdminBackup Condition Message cannot be empty") } // move this if outside func? - // Update NAB status condition - update := apimeta.SetStatusCondition(&nab.Status.Conditions, + updated := apimeta.SetStatusCondition(&nab.Status.Conditions, metav1.Condition{ Type: string(condition), Status: conditionStatus, @@ -208,23 +191,21 @@ func UpdateNonAdminBackupCondition(ctx context.Context, r client.Client, logger Message: message, }, ) - if !update { + if !updated { // would remove log logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition is already set to: %s", condition)) return false, nil } - // TODO these logs should be after err check, no? - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition set to: %s", condition)) - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Reason set to: %s", reason)) - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Message set to: %s", message)) - - // Update NAB status + // Update NAB status in cluster if err := r.Status().Update(ctx, nab); err != nil { logger.Error(err, "NonAdminBackup Condition - Failed to update") return false, err } + logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition set to: %s", condition)) + logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Reason set to: %s", reason)) + logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Message set to: %s", message)) return true, nil } diff --git a/internal/common/function/function_test.go b/internal/common/function/function_test.go index bd95f28..2907201 100644 --- a/internal/common/function/function_test.go +++ b/internal/common/function/function_test.go @@ -164,20 +164,13 @@ func TestAddNonAdminBackupAnnotations(t *testing.T) { } func TestGetBackupSpecFromNonAdminBackup(t *testing.T) { - // Test case: nonAdminBackup is nil - nonAdminBackup := (*nacv1alpha1.NonAdminBackup)(nil) - backupSpec, err := GetBackupSpecFromNonAdminBackup(nonAdminBackup) - assert.Error(t, err) - assert.Nil(t, backupSpec) - assert.Equal(t, "nonAdminBackup is nil", err.Error()) - // Test case: BackupSpec is nil - nonAdminBackup = &nacv1alpha1.NonAdminBackup{ + nonAdminBackup := &nacv1alpha1.NonAdminBackup{ Spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: nil, }, } - backupSpec, err = GetBackupSpecFromNonAdminBackup(nonAdminBackup) + backupSpec, err := GetBackupSpecFromNonAdminBackup(nonAdminBackup) assert.Error(t, err) assert.Nil(t, backupSpec) assert.Equal(t, "BackupSpec is not defined", err.Error()) diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index a118bb8..2eb71a7 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -80,20 +80,20 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque } logger.Error(err, "Unable to fetch NonAdminBackup") return ctrl.Result{}, err + // how to avoid this being reconciled forever? } // requeue on every change is the correct pattern! document this // TODO refactor idea: do not enter on sub functions again // TODO refactor idea: sub functions can not exit clean, that should be main func responsibility. Remove reconcileExit return param + // TODO refactor idea: sub functions can not requeue, that should be predicate responsibility. Remove requeueReconcile return param // TODO refactor idea: - // requeue, err := r.Init(ctx, rLog, &nab) + // err := r.Init(ctx, logger, &nab) // Init calls ValidateSpec, that calls UpdateSpecStatus, and etc... // if err != nil { // // handle err smart way to retry when wanted? // return ctrl.Result{}, reconcile.TerminalError(err) // } - // if requeue { - // return ctrl.Result{Requeue: true}, nil - // } + // // SOURCE https://github.com/kubernetes-sigs/controller-runtime/blob/e6c3d139d2b6c286b1dbba6b6a95919159cfe655/pkg/internal/controller/controller.go#L286 // Alright, after studies, I believe there are only 2 possibilities (DEV eyes): // - re trigger reconcile @@ -133,6 +133,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } + logger.V(1).Info("NonAdminBackup Reconcile exit") return ctrl.Result{}, nil } @@ -154,8 +155,21 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log if nab.Status.Phase == constant.EmptyString { // Set initial Phase to New - // can this function be simplified to return just an error? - // can it return false, nil? + // TODO refactor idea: this function should return just a bool, like apimeta.SetStatusCondition() + // TODO refactor idea: logger calls should all be done in this file, so it is easier to control what is being logged + // TODO refactor idea: r.Status().Update() calls should all be done in this file, so it is easier to control number of updates per reconcile + // TODO refactor idea: + // updated := function.UpdateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseNew) + // if updated { + // if err := r.Status().Update(ctx, nab); err != nil { + // logger.Error(err, "Failed to update NonAdminBackup Phase") + // return err + // } + + // logger.V(1).Info("NonAdminBackup Phase updated") + // return nil + // } + // TODO refactor idea: remove outer if updatedStatus, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseNew) if errUpdate != nil { logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: New") @@ -169,6 +183,7 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log logger.V(1).Info("NonAdminBackup Status.Phase already initialized") return false, false, nil + // return ValidateSpec } // ValidateSpec validates the Spec from the NonAdminBackup. @@ -200,11 +215,10 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger logger.Error(errUpdateStatus, "Unable to set NonAdminBackup Phase: BackingOff") return true, false, errUpdateStatus } else if updatedStatus { - // We do not requeue - the State was set to BackingOff + // We do not requeue - the State was set to BackingOff - BUG return true, false, nil } - // Continue. VeleroBackup looks fine, setting Accepted condition to false updatedCondition, errUpdateCondition := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionFalse, "InvalidBackupSpec", "NonAdminBackup does not contain valid BackupSpec") if errUpdateCondition != nil { logger.Error(errUpdateCondition, "Unable to set BackupAccepted Condition: Accepted False") @@ -212,8 +226,27 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger } else if updatedCondition { return true, false, nil } + // TODO refactor idea: this function should be deleted, use apimeta.SetStatusCondition() + // TODO refactor idea: + // updatedPhase := function.UpdateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseNew) + // updatedCondition := apimeta.SetStatusCondition(&nab.Status.Conditions, + // metav1.Condition{ + // Type: nacv1alpha1.NonAdminConditionAccepted, + // Status: metav1.ConditionFalse, + // Reason: "InvalidBackupSpec", + // Message: "NonAdminBackup does not contain valid BackupSpec", + // }, + // ) + // if updatedPhase || updatedCondition { + // if err := r.Status().Update(ctx, nab); err != nil { + // logger.Error(err, "Failed to update NonAdminBackup Phase") + // return err + // } + + // logger.V(1).Info("NonAdminBackup Status updated") + // return nil + // } - // We do not requeue - this was an error from getting Spec from NAB return true, false, err } @@ -225,13 +258,13 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger logger.Error(errUpdateStatus, "Unable to set BackupAccepted Condition: Accepted True") return true, false, errUpdateStatus } else if updatedStatus { - // We do requeue - The VeleroBackup got validated and next reconcile loop will continue - // with further work on the VeleroBackup such as creating it + logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") return false, true, nil } logger.V(1).Info("NonAdminBackup Spec already validated") return false, false, nil + // return UpdateSpecStatus } // UpdateSpecStatus updates the Spec and Status from the NonAdminBackup. @@ -326,7 +359,7 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog // The VeleroBackup within NonAdminBackup will // be reverted back to the previous state - the state which created VeleroBackup // in a first place, so they will be in sync. - veleroBackupLogger.Info("VeleroBackup already exists, updating NonAdminBackup status") + veleroBackupLogger.Info("VeleroBackup already exists, updating NonAdminBackup Status") updatedNab, errBackupUpdate := function.UpdateNonAdminBackupFromVeleroBackup(ctx, r.Client, logger, nab, &veleroBackup) // Regardless if the status was updated or not, we should not // requeue here as it was only status update. AND SPEC??? From 492a54f60a50849a2d14b3799e1fec7cf5869ef8 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Wed, 4 Sep 2024 17:52:51 -0300 Subject: [PATCH 08/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- cmd/main.go | 1 - go.mod | 1 - internal/common/function/function.go | 79 +----- .../controller/nonadminbackup_controller.go | 245 ++++++++---------- .../nonadminbackup_controller_test.go | 242 ++++++++++------- internal/handler/velerobackup_handler.go | 17 +- internal/predicate/composite_predicate.go | 3 - .../predicate/nonadminbackup_predicate.go | 20 +- internal/predicate/velerobackup_predicate.go | 18 +- 9 files changed, 273 insertions(+), 353 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 8fa988c..0d81d67 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -137,7 +137,6 @@ func main() { Client: mgr.GetClient(), Scheme: mgr.GetScheme(), OADPNamespace: oadpNamespace, - // TODO context does not need to be set here??? }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "NonAdminBackup") os.Exit(1) diff --git a/go.mod b/go.mod index ba2d575..32d49c7 100644 --- a/go.mod +++ b/go.mod @@ -76,5 +76,4 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -// need update? replace github.com/vmware-tanzu/velero => github.com/openshift/velero v0.10.2-0.20231024175012-d8101a298016 diff --git a/internal/common/function/function.go b/internal/common/function/function.go index d1bb83d..3fd3a3c 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -21,15 +21,12 @@ import ( "context" "crypto/sha256" "encoding/hex" - "errors" "fmt" "os" "reflect" "github.com/go-logr/logr" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -148,67 +145,6 @@ func GenerateVeleroBackupName(namespace, nabName string) string { return veleroBackupName } -// UpdateNonAdminPhase updates the phase of a NonAdminBackup object with the provided phase. -func UpdateNonAdminPhase(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, phase nacv1alpha1.NonAdminBackupPhase) (bool, error) { - // Ensure phase is valid - if phase == constant.EmptyString { - return false, errors.New("NonAdminBackupPhase cannot be empty") - } - - if nab.Status.Phase == phase { - logger.V(1).Info("NonAdminBackup Phase is already up to date") - return false, nil - } - - // Update NAB status - nab.Status.Phase = phase - if err := r.Status().Update(ctx, nab); err != nil { - logger.Error(err, "Failed to update NonAdminBackup Phase") - return false, err - } - - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Phase set to: %s", phase)) - return true, nil -} - -// UpdateNonAdminBackupCondition updates the condition of a NonAdminBackup object -// based on the provided parameters. It validates the input parameters and ensures -// that the condition is set to the desired status only if it differs from the current status. -// If the condition is already set to the desired status, no update is performed. -func UpdateNonAdminBackupCondition(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, condition nacv1alpha1.NonAdminCondition, conditionStatus metav1.ConditionStatus, reason string, message string) (bool, error) { - // log should be parent responsibility? - // is not this metav1 responsibility? - if message == constant.EmptyString { - return false, errors.New("NonAdminBackup Condition Message cannot be empty") - } - - // move this if outside func? - updated := apimeta.SetStatusCondition(&nab.Status.Conditions, - metav1.Condition{ - Type: string(condition), - Status: conditionStatus, - Reason: reason, - Message: message, - }, - ) - if !updated { - // would remove log - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition is already set to: %s", condition)) - return false, nil - } - - // Update NAB status in cluster - if err := r.Status().Update(ctx, nab); err != nil { - logger.Error(err, "NonAdminBackup Condition - Failed to update") - return false, err - } - - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition set to: %s", condition)) - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Reason set to: %s", reason)) - logger.V(1).Info(fmt.Sprintf("NonAdminBackup Condition Message set to: %s", message)) - return true, nil -} - // UpdateNonAdminBackupFromVeleroBackup update, if necessary, NonAdminBackup object fields related to referenced Velero Backup object, if no error occurs func UpdateNonAdminBackupFromVeleroBackup(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, veleroBackup *velerov1api.Backup) (bool, error) { logger.V(1).Info("NonAdminBackup BackupSpec and VeleroBackupStatus - request to update") @@ -229,9 +165,9 @@ func UpdateNonAdminBackupFromVeleroBackup(ctx context.Context, r client.Client, return false, err } logger.V(1).Info("NonAdminBackup BackupStatus - updated") - } else { - logger.V(1).Info("NonAdminBackup BackupStatus - up to date") + return true, nil } + logger.V(1).Info("NonAdminBackup BackupStatus - up to date") // Check if BackupSpec needs to be updated // avoid spec change? @@ -242,14 +178,15 @@ func UpdateNonAdminBackupFromVeleroBackup(ctx context.Context, r client.Client, return false, err } logger.V(1).Info("NonAdminBackup BackupSpec - updated") - } else { - logger.V(1).Info("NonAdminBackup BackupSpec - up to date") + return true, nil } + logger.V(1).Info("NonAdminBackup BackupSpec - up to date") - // If either BackupStatus or BackupSpec was updated, return true - return true, nil + return false, nil } +// TODO not used + // CheckVeleroBackupLabels return true if Velero Backup object has required Non Admin labels, false otherwise func CheckVeleroBackupLabels(labels map[string]string) bool { // TODO also need to check for constant.OadpLabel label? @@ -264,6 +201,8 @@ func GetNonAdminBackupFromVeleroBackup(ctx context.Context, clientInstance clien // Check if the backup has the required annotations to identify the associated NonAdminBackup object logger := log.FromContext(ctx) + // should run CheckVeleroBackupLabels here? + annotations := backup.GetAnnotations() annotationsStr := fmt.Sprintf("%v", annotations) diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index 2eb71a7..1198bc1 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -20,11 +20,11 @@ package controller import ( "context" "errors" - "time" "github.com/go-logr/logr" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -46,13 +46,8 @@ type NonAdminBackupReconciler struct { client.Client Scheme *runtime.Scheme OADPNamespace string - // needed??? - // Context context.Context } -// TODO TOO MUCH!!!!!!!!!!!!!!! -const requeueTimeSeconds = 10 - // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminbackups,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminbackups/status,verbs=get;update;patch // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminbackups/finalizers,verbs=update @@ -62,11 +57,6 @@ const requeueTimeSeconds = 10 // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the NonAdminBackup to the desired state. func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // logger := log.FromContext(r.Context) - // {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"t","namespace":"n"}, "namespace": "n", "name": "t", "reconcileID": "x-x-x"} - // I think there is duplication with controller and controllerKind (and controllerGroup is noy useful) - // duplication with NonAdminBackup, namespace and name - // there is a use for reconcileID? logger := log.FromContext(ctx) logger.V(1).Info("NonAdminBackup Reconcile start") @@ -80,56 +70,39 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque } logger.Error(err, "Unable to fetch NonAdminBackup") return ctrl.Result{}, err - // how to avoid this being reconciled forever? } - // requeue on every change is the correct pattern! document this - // TODO refactor idea: do not enter on sub functions again - // TODO refactor idea: sub functions can not exit clean, that should be main func responsibility. Remove reconcileExit return param - // TODO refactor idea: sub functions can not requeue, that should be predicate responsibility. Remove requeueReconcile return param - // TODO refactor idea: - // err := r.Init(ctx, logger, &nab) // Init calls ValidateSpec, that calls UpdateSpecStatus, and etc... - // if err != nil { - // // handle err smart way to retry when wanted? - // return ctrl.Result{}, reconcile.TerminalError(err) - // } - // - // SOURCE https://github.com/kubernetes-sigs/controller-runtime/blob/e6c3d139d2b6c286b1dbba6b6a95919159cfe655/pkg/internal/controller/controller.go#L286 - // Alright, after studies, I believe there are only 2 possibilities (DEV eyes): - // - re trigger reconcile - // AddRateLimited ([requeue and nill error] or [normal error]) - // will re trigger reconcile immediately, after 1 second, after 2 seconds, etc - // AddAfter ([RequeueAfter and nill error]) - // will re trigger reconcile after time - // - will not re trigger reconcile - // Forget (finish process) ([empty result and nill error] or [terminal error]) - reconcileExit, reconcileRequeue, reconcileErr := r.Init(ctx, logger, &nab) if reconcileRequeue { - // TODO EITHER Requeue or RequeueAfter, both together do not make sense!!! - return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr + logger.V(1).Info("NonAdminBackup Reconcile requeue") + return ctrl.Result{Requeue: true}, reconcileErr } else if reconcileExit && reconcileErr != nil { - return ctrl.Result{}, reconcile.TerminalError(reconcileErr) + return ctrl.Result{}, reconcileErr } else if reconcileExit { + logger.V(1).Info("NonAdminBackup Reconcile exit") return ctrl.Result{}, nil } // would not be better to validate first? reconcileExit, reconcileRequeue, reconcileErr = r.ValidateSpec(ctx, logger, &nab) if reconcileRequeue { - return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr + logger.V(1).Info("NonAdminBackup Reconcile requeue") + return ctrl.Result{Requeue: true}, reconcileErr } else if reconcileExit && reconcileErr != nil { - return ctrl.Result{}, reconcile.TerminalError(reconcileErr) + return ctrl.Result{}, reconcileErr } else if reconcileExit { + logger.V(1).Info("NonAdminBackup Reconcile exit") return ctrl.Result{}, nil } reconcileExit, reconcileRequeue, reconcileErr = r.UpdateSpecStatus(ctx, logger, &nab) if reconcileRequeue { - return ctrl.Result{Requeue: true, RequeueAfter: requeueTimeSeconds * time.Second}, reconcileErr + logger.V(1).Info("NonAdminBackup Reconcile requeue") + return ctrl.Result{Requeue: true}, reconcileErr } else if reconcileExit && reconcileErr != nil { - return ctrl.Result{}, reconcile.TerminalError(reconcileErr) + return ctrl.Result{}, reconcileErr } else if reconcileExit { + logger.V(1).Info("NonAdminBackup Reconcile exit") return ctrl.Result{}, nil } @@ -150,32 +123,16 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque // It then returns boolean values indicating whether the reconciliation loop should requeue or exit // and error value whether the status was updated successfully. func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - // logger := logrLogger.WithValues("Init NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) logger := logrLogger if nab.Status.Phase == constant.EmptyString { - // Set initial Phase to New - // TODO refactor idea: this function should return just a bool, like apimeta.SetStatusCondition() - // TODO refactor idea: logger calls should all be done in this file, so it is easier to control what is being logged - // TODO refactor idea: r.Status().Update() calls should all be done in this file, so it is easier to control number of updates per reconcile - // TODO refactor idea: - // updated := function.UpdateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseNew) - // if updated { - // if err := r.Status().Update(ctx, nab); err != nil { - // logger.Error(err, "Failed to update NonAdminBackup Phase") - // return err - // } - - // logger.V(1).Info("NonAdminBackup Phase updated") - // return nil - // } - // TODO refactor idea: remove outer if - updatedStatus, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseNew) - if errUpdate != nil { - logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: New") - return true, false, errUpdate - } - if updatedStatus { + updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseNew) + if updated { + if err := r.Status().Update(ctx, nab); err != nil { + logger.Error(err, "Failed to update NonAdminBackup Phase") + return true, false, err + } + logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") return false, true, nil } @@ -183,7 +140,6 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log logger.V(1).Info("NonAdminBackup Status.Phase already initialized") return false, false, nil - // return ValidateSpec } // ValidateSpec validates the Spec from the NonAdminBackup. @@ -194,13 +150,11 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log // logrLogger: Logger instance for logging messages. // nab: Pointer to the NonAdminBackup object. // -// The function attempts to get the BackupSpec from the NonAdminBackup object. -// If an error occurs during this process, the function sets the NonAdminBackup status to "BackingOff" -// and updates the corresponding condition accordingly. -// If the BackupSpec is invalid, the function sets the NonAdminBackup condition to "InvalidBackupSpec". THIS DOES NOT HAPPEN -// If the BackupSpec is valid, the function sets the NonAdminBackup condition to "BackupAccepted". remove? +// The function validates the BackupSpec from the NonAdminBackup object. +// If the BackupSpec is invalid, the function sets the NonAdminBackup phase to "BackingOff". +// If the BackupSpec is invalid, the function sets the NonAdminBackup condition to "InvalidBackupSpec". +// If the BackupSpec is valid, the function sets the NonAdminBackup condition to "BackupAccepted". func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - // logger := logrLogger.WithValues("ValidateSpec NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) logger := logrLogger // Main Validation point for the VeleroBackup included in NonAdminBackup spec @@ -208,63 +162,55 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger if err != nil { logger.Error(err, "NonAdminBackup Spec is not valid") - // this should be one call: update both phase and condition at THE SAME TIME - // OR do requeue, CONDITION is never set to false - updatedStatus, errUpdateStatus := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseBackingOff) - if errUpdateStatus != nil { - logger.Error(errUpdateStatus, "Unable to set NonAdminBackup Phase: BackingOff") - return true, false, errUpdateStatus - } else if updatedStatus { - // We do not requeue - the State was set to BackingOff - BUG - return true, false, nil + updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseBackingOff) + if updated { + if updateErr := r.Status().Update(ctx, nab); updateErr != nil { + logger.Error(updateErr, "Failed to update NonAdminBackup Phase") + return true, false, updateErr + } + + logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") + return false, true, nil } - updatedCondition, errUpdateCondition := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionFalse, "InvalidBackupSpec", "NonAdminBackup does not contain valid BackupSpec") - if errUpdateCondition != nil { - logger.Error(errUpdateCondition, "Unable to set BackupAccepted Condition: Accepted False") - return true, false, errUpdateCondition - } else if updatedCondition { - return true, false, nil + updated = meta.SetStatusCondition(&nab.Status.Conditions, + metav1.Condition{ + Type: string(nacv1alpha1.NonAdminConditionAccepted), + Status: metav1.ConditionFalse, + Reason: "InvalidBackupSpec", + Message: "NonAdminBackup does not contain valid BackupSpec", + }, + ) + if updated { + if updateErr := r.Status().Update(ctx, nab); updateErr != nil { + logger.Error(updateErr, "Failed to update NonAdminBackup Condition") + return true, false, updateErr + } } - // TODO refactor idea: this function should be deleted, use apimeta.SetStatusCondition() - // TODO refactor idea: - // updatedPhase := function.UpdateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseNew) - // updatedCondition := apimeta.SetStatusCondition(&nab.Status.Conditions, - // metav1.Condition{ - // Type: nacv1alpha1.NonAdminConditionAccepted, - // Status: metav1.ConditionFalse, - // Reason: "InvalidBackupSpec", - // Message: "NonAdminBackup does not contain valid BackupSpec", - // }, - // ) - // if updatedPhase || updatedCondition { - // if err := r.Status().Update(ctx, nab); err != nil { - // logger.Error(err, "Failed to update NonAdminBackup Phase") - // return err - // } - - // logger.V(1).Info("NonAdminBackup Status updated") - // return nil - // } - - return true, false, err + + return true, false, reconcile.TerminalError(err) } - // TODO is this needed? from design, does not seem a valid condition - // this keeps being called... - // this or UpdateNonAdminBackupCondition(..., "BackupAccepted", "Backup accepted") should be deleted - updatedStatus, errUpdateStatus := function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "Validated", "Valid Backup config") - if errUpdateStatus != nil { - logger.Error(errUpdateStatus, "Unable to set BackupAccepted Condition: Accepted True") - return true, false, errUpdateStatus - } else if updatedStatus { - logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") + updated := meta.SetStatusCondition(&nab.Status.Conditions, + metav1.Condition{ + Type: string(nacv1alpha1.NonAdminConditionAccepted), + Status: metav1.ConditionTrue, + Reason: "BackupAccepted", + Message: "Backup accepted", + }, + ) + if updated { + if err := r.Status().Update(ctx, nab); err != nil { + logger.Error(err, "Failed to update NonAdminBackup Condition") + return true, false, err + } + + logger.V(1).Info("NonAdminBackup - Requeue after Condition Update") return false, true, nil } logger.V(1).Info("NonAdminBackup Spec already validated") return false, false, nil - // return UpdateSpecStatus } // UpdateSpecStatus updates the Spec and Status from the NonAdminBackup. @@ -280,7 +226,6 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger // and updates NonAdminBackup Status. Otherwise, updates NonAdminBackup VeleroBackup Status based on Velero Backup object Status. // The function returns boolean values indicating whether the reconciliation loop should exit or requeue func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { - // logger := logrLogger.WithValues("UpdateSpecStatus NonAdminBackup", types.NamespacedName{Name: nab.Name, Namespace: nab.Namespace}) logger := logrLogger veleroBackupName := function.GenerateVeleroBackupName(nab.Namespace, nab.Name) @@ -304,7 +249,6 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog // We don't validate error here. // This was already validated in the ValidateVeleroBackupSpec backupSpec, errBackup := function.GetBackupSpecFromNonAdminBackup(nab) - if errBackup != nil { // Should never happen as it was already checked return true, false, errBackup @@ -336,25 +280,38 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog } veleroBackupLogger.Info("VeleroBackup successfully created") - // TODO merge this update calls? I think this is the error cause - _, errUpdate := function.UpdateNonAdminPhase(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminBackupPhaseCreated) - if errUpdate != nil { - logger.Error(errUpdate, "Unable to set NonAdminBackup Phase: Created") - return true, false, errUpdate - } - _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionAccepted, metav1.ConditionTrue, "BackupAccepted", "Backup accepted") - if errUpdate != nil { - logger.Error(errUpdate, "Unable to set BackupAccepted Condition: Accepted True") - return true, false, errUpdate - } - _, errUpdate = function.UpdateNonAdminBackupCondition(ctx, r.Client, logger, nab, nacv1alpha1.NonAdminConditionQueued, metav1.ConditionTrue, "BackupScheduled", "Created Velero Backup object") - if errUpdate != nil { - logger.Error(errUpdate, "Unable to set BackupQueued Condition: Queued True") - return true, false, errUpdate + updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseCreated) + if updated { + if err := r.Status().Update(ctx, nab); err != nil { + logger.Error(err, "Failed to update NonAdminBackup Phase") + return true, false, err + } + + logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") + return false, true, nil } return false, false, nil } + + updated := meta.SetStatusCondition(&nab.Status.Conditions, + metav1.Condition{ + Type: string(nacv1alpha1.NonAdminConditionQueued), + Status: metav1.ConditionTrue, + Reason: "BackupScheduled", + Message: "Created Velero Backup object", + }, + ) + if updated { + if err := r.Status().Update(ctx, nab); err != nil { + logger.Error(err, "Failed to update NonAdminBackup Condition") + return true, false, err + } + + logger.V(1).Info("NonAdminBackup - Requeue after Condition Update") + return false, true, nil + } + // We should not update already created VeleroBackup object. // The VeleroBackup within NonAdminBackup will // be reverted back to the previous state - the state which created VeleroBackup @@ -366,14 +323,12 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog if errBackupUpdate != nil { return true, false, errBackupUpdate } else if updatedNab { - logger.V(1).Info("NonAdminBackup - Requeue after Status Update") + logger.V(1).Info("NonAdminBackup - Requeue after Status Update") // AND SPEC??? return false, true, nil } return true, false, nil } -// TODO refactor idea: break in smaller functions: CreateVeleroBackup, UpdateStatusAfterVeleroBackupCreation and UpdateSpecStatus - // SetupWithManager sets up the controller with the Manager. func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). @@ -384,7 +339,21 @@ func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { VeleroBackupPredicate: predicate.VeleroBackupPredicate{ OadpVeleroNamespace: r.OADPNamespace, }, - // Context: r.Context, }). Complete(r) } + +// UpdateNonAdminPhase updates the phase of a NonAdminBackup object with the provided phase. +func updateNonAdminPhase(nab *nacv1alpha1.NonAdminBackup, phase nacv1alpha1.NonAdminBackupPhase) bool { + // Ensure phase is valid + if phase == constant.EmptyString { + return false + } + + if nab.Status.Phase == phase { + return false + } + + nab.Status.Phase = phase + return true +} diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 9d185df..fb99667 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -18,9 +18,10 @@ package controller import ( "context" + // "net/http" + "fmt" "log" "strconv" - // "net/http" "time" "github.com/onsi/ginkgo/v2" @@ -34,17 +35,20 @@ import ( nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" "github.com/migtools/oadp-non-admin/internal/common/constant" + "github.com/migtools/oadp-non-admin/internal/common/function" ) const testNonAdminBackupName = "test-non-admin-backup" type nonAdminBackupReconcileScenario struct { - namespace string - oadpNamespace string - spec nacv1alpha1.NonAdminBackupSpec - priorStatus *nacv1alpha1.NonAdminBackupStatus - status nacv1alpha1.NonAdminBackupStatus - result reconcile.Result + namespace string + oadpNamespace string + spec nacv1alpha1.NonAdminBackupSpec + priorStatus *nacv1alpha1.NonAdminBackupStatus + status nacv1alpha1.NonAdminBackupStatus + result reconcile.Result + resultError error + createVeleroBackup bool // TODO create a struct for each test case! ctx context.Context cancel context.CancelFunc @@ -161,6 +165,16 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func priorResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + if scenario.createVeleroBackup { + veleroBackup := &v1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: function.GenerateVeleroBackupName(scenario.namespace, testNonAdminBackupName), + Namespace: scenario.oadpNamespace, + }, + } + gomega.Expect(k8sClient.Create(ctx, veleroBackup)).To(gomega.Succeed()) + } + result, err := (&NonAdminBackupReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, @@ -175,7 +189,12 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func // TODO need to collect logs, so they do not appear in test run // also assert them gomega.Expect(result).To(gomega.Equal(scenario.result)) - gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + if scenario.resultError == nil { + gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + } else { + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err.Error()).To(gomega.Equal(scenario.resultError.Error())) + } gomega.Expect(k8sClient.Get( ctx, @@ -191,6 +210,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus).To(gomega.Equal(scenario.status.VeleroBackupStatus)) + gomega.Expect(nonAdminBackup.Status.Conditions).To(gomega.HaveLen(len(scenario.status.Conditions))) for index := range nonAdminBackup.Status.Conditions { gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) gomega.Expect(nonAdminBackup.Status.Conditions[index].Status).To(gomega.Equal(scenario.status.Conditions[index].Status)) @@ -204,12 +224,12 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, ginkgo.Entry("Should accept creation of NonAdminBackup", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-1", - result: reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, + result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept update of NonAdminBackup phase to new", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-2", spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, @@ -217,21 +237,20 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, - result: reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, + result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, Conditions: []metav1.Condition{ - // Is this a valid Condition??? { Type: "Accepted", Status: metav1.ConditionTrue, - Reason: "Validated", - Message: "Valid Backup config", + Reason: "BackupAccepted", + Message: "Backup accepted", }, }, }, }), - ginkgo.Entry("Should accept update of NonAdminBackup Condition", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept update of NonAdminBackup Condition to Accepted True", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-3", oadpNamespace: "test-nonadminbackup-reconcile-3-oadp", spec: nacv1alpha1.NonAdminBackupSpec{ @@ -240,19 +259,100 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, Conditions: []metav1.Condition{ - // Is this a valid Condition??? { Type: "Accepted", Status: metav1.ConditionTrue, - Reason: "Validated", - Message: "Valid Backup config", + Reason: "BackupAccepted", + Message: "Backup accepted", LastTransitionTime: metav1.NewTime(time.Now()), }, }, }, + result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ // TODO should not have VeleroBackupName and VeleroBackupNamespace? Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "BackupAccepted", + Message: "Backup accepted", + }, + }, + }, + }), + ginkgo.Entry("Should accept update of NonAdminBackup phase to created", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-4", + oadpNamespace: "test-nonadminbackup-reconcile-4-oadp", + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{}, + }, + priorStatus: &nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "BackupAccepted", + Message: "Backup accepted", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + createVeleroBackup: true, + result: reconcile.Result{Requeue: true}, + status: nacv1alpha1.NonAdminBackupStatus{ + // TODO should not have VeleroBackupName and VeleroBackupNamespace? + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "BackupAccepted", + Message: "Backup accepted", + }, + { + Type: "Queued", + Status: metav1.ConditionTrue, + Reason: "BackupScheduled", + Message: "Created Velero Backup object", + }, + }, + }, + }), + ginkgo.Entry("Should accept update of NonAdminBackup Condition to Queued True", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-5", + oadpNamespace: "test-nonadminbackup-reconcile-5-oadp", + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{}, + }, + priorStatus: &nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "BackupAccepted", + Message: "Backup accepted", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + { + Type: "Queued", + Status: metav1.ConditionTrue, + Reason: "BackupScheduled", + Message: "Created Velero Backup object", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + createVeleroBackup: true, + result: reconcile.Result{Requeue: true}, + status: nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + VeleroBackupName: "nab-test-nonadminbackup-reconcile-5-c9dd6af01e2e2a", + VeleroBackupNamespace: "test-nonadminbackup-reconcile-5-oadp", + VeleroBackupStatus: &v1.BackupStatus{}, Conditions: []metav1.Condition{ { Type: "Accepted", @@ -269,32 +369,44 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }), - ginkgo.Entry("Should NOT accept update of NonAdminBackup phase because of empty backupSpec", nonAdminBackupReconcileScenario{ - // TODO WRONG this should be a validator not a code logic - namespace: "test-nonadminbackup-reconcile-4", - spec: nacv1alpha1.NonAdminBackupSpec{}, + ginkgo.Entry("Should accept update of NonAdminBackup phase to new - invalid spec", nonAdminBackupReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-6", + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{ + IncludedNamespaces: []string{"not-valid"}, + }, + }, priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, + result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, - // should not return terminal error? }), - ginkgo.Entry("Should NOT accept update of NonAdminBackup phase because of includedNamespaces pointing to different namespace", nonAdminBackupReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-5", + ginkgo.Entry("Should accept update of NonAdminBackup phase to BackingOff", nonAdminBackupReconcileScenario{ + // this validates spec again... WRONG!!! + namespace: "test-nonadminbackup-reconcile-7", spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{ IncludedNamespaces: []string{"not-valid"}, }, }, priorStatus: &nacv1alpha1.NonAdminBackupStatus{ - Phase: nacv1alpha1.NonAdminBackupPhaseNew, + Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, + resultError: reconcile.TerminalError(fmt.Errorf("spec.backupSpec.IncludedNamespaces can not contain namespaces other than: test-nonadminbackup-reconcile-7")), status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionFalse, + Reason: "InvalidBackupSpec", + Message: "NonAdminBackup does not contain valid BackupSpec", + }, + }, }, - // should not return terminal error? }), ) }) @@ -403,8 +515,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", return false, err } return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges, nil - // TOO MUCH TIME!!!! - }, 35*time.Second, 1*time.Second).Should(gomega.BeTrue()) + }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) log.Println("Validating NonAdminBackup Status") gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) @@ -429,12 +540,11 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }, veleroBackup, )).To(gomega.Succeed()) - veleroBackup.Status.Phase = v1.BackupPhaseNew + veleroBackup.Status.Phase = v1.BackupPhaseCompleted // TODO I can not call .Status().Update() for veleroBackup object: backups.velero.io "name..." not found gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) // every update produces 2 reconciles: VeleroBackupPredicate on update -> reconcile start -> update nab status -> requeue -> reconcile start - // only one mock update should be enough, right? gomega.Eventually(func() (bool, error) { err := k8sClient.Get( currentTestScenario.ctx, @@ -453,56 +563,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", } // why 2 ResourceVersion upgrades per veleroBackup update? return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges+2, nil - // TOO MUCH TIME!!!! - }, 15*time.Second, 1*time.Second).Should(gomega.BeTrue()) - gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhaseNew)) - - veleroBackup.Status.Phase = v1.BackupPhaseInProgress - gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) - - gomega.Eventually(func() (bool, error) { - err := k8sClient.Get( - currentTestScenario.ctx, - types.NamespacedName{ - Name: testNonAdminBackupName, - Namespace: scenario.namespace, - }, - nonAdminBackup, - ) - if err != nil { - return false, err - } - currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) - if err != nil { - return false, err - } - return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges+4, nil - // TOO MUCH TIME!!!! - }, 15*time.Second, 1*time.Second).Should(gomega.BeTrue()) - gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhaseInProgress)) - - veleroBackup.Status.Phase = v1.BackupPhaseCompleted - gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) - - gomega.Eventually(func() (bool, error) { - err := k8sClient.Get( - currentTestScenario.ctx, - types.NamespacedName{ - Name: testNonAdminBackupName, - Namespace: scenario.namespace, - }, - nonAdminBackup, - ) - if err != nil { - return false, err - } - currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) - if err != nil { - return false, err - } - return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges+6, nil - // TOO MUCH TIME!!!! - }, 15*time.Second, 1*time.Second).Should(gomega.BeTrue()) + }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhaseCompleted)) gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) @@ -520,12 +581,11 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", VeleroBackupName: "nab-test-nonadminbackup-reconcile-full-1-c9dd6af01e2e2a", VeleroBackupNamespace: "test-nonadminbackup-reconcile-full-1-oadp", Conditions: []metav1.Condition{ - // Is this a valid Condition??? { Type: "Accepted", Status: metav1.ConditionTrue, - Reason: "Validated", - Message: "Valid Backup config", + Reason: "BackupAccepted", + Message: "Backup accepted", }, { Type: "Queued", @@ -535,20 +595,8 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }, }, }, - numberOfResourceVersionChanges: 13, // should be similar to reconcile starts??? + numberOfResourceVersionChanges: 7, // should be similar to reconcile starts??? }), - // PRIOR to mocking velero backup updates - // events 10: 2 creates (1 nab, 1 velero), 8 update event (all nab, 5 rejected) - // 6 reconcile starts - // time: 30s-20s - // - // TODO saw this flake!! - // 2024-09-02T10:58:31-03:00 ERROR NonAdminBackup Condition - Failed to update {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "fd1db7a8-6ed5-40ea-b5f6-03c4b1b88dd1", "ValidateSpec NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "error": "Operation cannot be fulfilled on nonadminbackups.nac.oadp.openshift.io \"test-non-admin-backup\": the object has been modified; please apply your changes to the latest version and try again"} - // stacktrace... - // 2024-09-02T10:58:31-03:00 ERROR Unable to set BackupAccepted Condition: True {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "fd1db7a8-6ed5-40ea-b5f6-03c4b1b88dd1", "ValidateSpec NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "error": "Operation cannot be fulfilled on nonadminbackups.nac.oadp.openshift.io \"test-non-admin-backup\": the object has been modified; please apply your changes to the latest version and try again"} - // stacktrace... - // 2024-09-02T10:58:31-03:00 ERROR Reconciler error {"controller": "nonadminbackup", "controllerGroup": "nac.oadp.openshift.io", "controllerKind": "NonAdminBackup", "NonAdminBackup": {"name":"test-non-admin-backup","namespace":"test-nonadminbackup-reconcile-full-1"}, "namespace": "test-nonadminbackup-reconcile-full-1", "name": "test-non-admin-backup", "reconcileID": "fd1db7a8-6ed5-40ea-b5f6-03c4b1b88dd1", "error": "terminal error: Operation cannot be fulfilled on nonadminbackups.nac.oadp.openshift.io \"test-non-admin-backup\": the object has been modified; please apply your changes to the latest version and try again"} - // stacktrace... // ginkgo.Entry("Should DO FULL sad path", nonAdminBackupReconcileScenario{ // namespace: "test-nonadminbackup-reconcile-full-2", diff --git a/internal/handler/velerobackup_handler.go b/internal/handler/velerobackup_handler.go index 9c9ac02..e521cab 100644 --- a/internal/handler/velerobackup_handler.go +++ b/internal/handler/velerobackup_handler.go @@ -31,22 +31,15 @@ import ( ) // VeleroBackupHandler contains event handlers for Velero Backup objects -type VeleroBackupHandler struct { - // why this? - // Logger logr.Logger -} +type VeleroBackupHandler struct{} func getVeleroBackupHandlerLogger(ctx context.Context, name, namespace string) logr.Logger { return log.FromContext(ctx).WithValues("VeleroBackupHandler", types.NamespacedName{Name: name, Namespace: namespace}) } // Create event handler -func (*VeleroBackupHandler) Create(ctx context.Context, evt event.CreateEvent, _ workqueue.RateLimitingInterface) { - nameSpace := evt.Object.GetNamespace() - name := evt.Object.GetName() - logger := getVeleroBackupHandlerLogger(ctx, name, nameSpace) - logger.V(1).Info("Received Create VeleroBackupHandler") - // is this func necessary? +func (*VeleroBackupHandler) Create(_ context.Context, _ event.CreateEvent, _ workqueue.RateLimitingInterface) { + // Create event handler for the Backup object } // Update event handler @@ -74,7 +67,9 @@ func (*VeleroBackupHandler) Update(ctx context.Context, evt event.UpdateEvent, q return } - // TODO AddRateLimited? + // TODO use GetNonAdminBackupFromVeleroBackup here + // check if I need more log here or in velero predicate + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ Name: nabOriginName, Namespace: nabOriginNamespace, diff --git a/internal/predicate/composite_predicate.go b/internal/predicate/composite_predicate.go index 113ffda..b067ca7 100644 --- a/internal/predicate/composite_predicate.go +++ b/internal/predicate/composite_predicate.go @@ -40,7 +40,6 @@ func (p CompositePredicate) Create(evt event.CreateEvent) bool { return p.NonAdminBackupPredicate.Create(p.Context, evt) case *velerov1api.Backup: return p.VeleroBackupPredicate.Create(p.Context, evt) - // return false? as we will always create ourselves default: return false } @@ -65,7 +64,6 @@ func (p CompositePredicate) Delete(evt event.DeleteEvent) bool { return p.NonAdminBackupPredicate.Delete(p.Context, evt) case *velerov1api.Backup: return p.VeleroBackupPredicate.Delete(p.Context, evt) - // return false default: return false } @@ -73,7 +71,6 @@ func (p CompositePredicate) Delete(evt event.DeleteEvent) bool { // Generic event filter func (p CompositePredicate) Generic(evt event.GenericEvent) bool { - // TODO Is this necessary? could not be a simple return false function? switch evt.Object.(type) { case *nacv1alpha1.NonAdminBackup: return p.NonAdminBackupPredicate.Generic(p.Context, evt) diff --git a/internal/predicate/nonadminbackup_predicate.go b/internal/predicate/nonadminbackup_predicate.go index 030ea71..4284329 100644 --- a/internal/predicate/nonadminbackup_predicate.go +++ b/internal/predicate/nonadminbackup_predicate.go @@ -29,9 +29,7 @@ import ( ) // NonAdminBackupPredicate contains event filters for Non Admin Backup objects -type NonAdminBackupPredicate struct { - // Logger logr.Logger -} +type NonAdminBackupPredicate struct{} func getNonAdminBackupPredicateLogger(ctx context.Context, name, namespace string) logr.Logger { return log.FromContext(ctx).WithValues("NonAdminBackupPredicate", types.NamespacedName{Name: name, Namespace: namespace}) @@ -42,27 +40,16 @@ func (NonAdminBackupPredicate) Create(ctx context.Context, evt event.CreateEvent nameSpace := evt.Object.GetNamespace() name := evt.Object.GetName() logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) - logger.V(1).Info("NonAdminBackupPredicate: Received Create event") - // DO we need all this logic? should not just be return true here? - if nonAdminBackup, ok := evt.Object.(*nacv1alpha1.NonAdminBackup); ok { - if nonAdminBackup.Status.Phase == constant.EmptyString || nonAdminBackup.Status.Phase == nacv1alpha1.NonAdminBackupPhaseNew { - logger.V(1).Info("NonAdminBackupPredicate: Accepted Create event") - return true - } - } - logger.V(1).Info("NonAdminBackupPredicate: Rejecting Create event") - return false + logger.V(1).Info("NonAdminBackupPredicate: Accepted Create event") + return true } // Update event filter func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent) bool { - // Do not reconcile on Status update nameSpace := evt.ObjectNew.GetNamespace() name := evt.ObjectNew.GetName() logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) - logger.V(1).Info("NonAdminBackupPredicate: Received Update event") - // resourceVersion? if evt.ObjectNew.GetGeneration() != evt.ObjectOld.GetGeneration() { logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - generation change") return true @@ -87,7 +74,6 @@ func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent } } logger.V(1).Info("NonAdminBackupPredicate: Rejecting Update event") - return false } diff --git a/internal/predicate/velerobackup_predicate.go b/internal/predicate/velerobackup_predicate.go index dd145d8..5734ba0 100644 --- a/internal/predicate/velerobackup_predicate.go +++ b/internal/predicate/velerobackup_predicate.go @@ -23,8 +23,6 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" - - "github.com/migtools/oadp-non-admin/internal/common/function" ) // VeleroBackupPredicate contains event filters for Velero Backup objects @@ -32,7 +30,6 @@ type VeleroBackupPredicate struct { // We are watching only Velero Backup objects within // namespace where OADP is. OadpVeleroNamespace string - // Logger logr.Logger } // TODO try to remove calls to get logger functions, try to initialize it @@ -41,17 +38,8 @@ func getBackupPredicateLogger(ctx context.Context, name, namespace string) logr. } // Create event filter -func (veleroBackupPredicate VeleroBackupPredicate) Create(ctx context.Context, evt event.CreateEvent) bool { - nameSpace := evt.Object.GetNamespace() - name := evt.Object.GetName() - logger := getBackupPredicateLogger(ctx, name, nameSpace) - logger.V(1).Info("VeleroBackupPredicate: Received Create event") - // TODO log accepted or not - if nameSpace != veleroBackupPredicate.OadpVeleroNamespace { - return false - } - return function.CheckVeleroBackupLabels(evt.Object.GetLabels()) - // refactor idea, move all validation to a function, predicate functions would just need to call it and log info +func (VeleroBackupPredicate) Create(_ context.Context, _ event.CreateEvent) bool { + return false } // Update event filter @@ -63,11 +51,11 @@ func (veleroBackupPredicate VeleroBackupPredicate) Update(ctx context.Context, e // TODO log accepted or not // should not check labels? return nameSpace == veleroBackupPredicate.OadpVeleroNamespace + // refactor idea, move all validation to a function, predicate functions would just need to call it and log info } // Delete event filter func (VeleroBackupPredicate) Delete(_ context.Context, _ event.DeleteEvent) bool { - // only create function when needed? changing in composite to simply return false return false } From cc03f939545ad33b2be7a4e7137a49b2def47539 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Thu, 5 Sep 2024 16:53:46 -0300 Subject: [PATCH 09/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- Makefile | 3 +- internal/common/function/function.go | 5 -- .../nonadminbackup_controller_test.go | 79 +++++++------------ internal/handler/velerobackup_handler.go | 15 ++-- internal/predicate/composite_predicate.go | 3 + .../predicate/nonadminbackup_predicate.go | 23 ++++-- internal/predicate/velerobackup_predicate.go | 19 +++-- 7 files changed, 70 insertions(+), 77 deletions(-) diff --git a/Makefile b/Makefile index 006cf0d..b57d5f9 100644 --- a/Makefile +++ b/Makefile @@ -224,14 +224,13 @@ editorconfig: $(LOCALBIN) ## Download editorconfig locally if necessary. mv $(LOCALBIN)/$${ec_binary} $(EC) ;\ } -# TODO increase to 60? +# TODO increase COVERAGE_THRESHOLD=50 .PHONY: ci ci: simulation-test lint docker-build hadolint check-generate check-manifests ec check-images ## Run all project continuous integration (CI) checks locally. .PHONY: simulation-test -# TODO coverage is not in sync in what is being actually done... simulation-test: envtest ## Run unit and integration tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $(shell go list ./... | grep -v oadp-non-admin/test) -test.coverprofile cover.out -test.v -ginkgo.vv @make check-coverage diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 3fd3a3c..b1929c1 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -170,7 +170,6 @@ func UpdateNonAdminBackupFromVeleroBackup(ctx context.Context, r client.Client, logger.V(1).Info("NonAdminBackup BackupStatus - up to date") // Check if BackupSpec needs to be updated - // avoid spec change? if !reflect.DeepEqual(nab.Spec.BackupSpec, &veleroBackup.Spec) { nab.Spec.BackupSpec = veleroBackup.Spec.DeepCopy() if err := r.Update(ctx, nab); err != nil { @@ -185,8 +184,6 @@ func UpdateNonAdminBackupFromVeleroBackup(ctx context.Context, r client.Client, return false, nil } -// TODO not used - // CheckVeleroBackupLabels return true if Velero Backup object has required Non Admin labels, false otherwise func CheckVeleroBackupLabels(labels map[string]string) bool { // TODO also need to check for constant.OadpLabel label? @@ -201,8 +198,6 @@ func GetNonAdminBackupFromVeleroBackup(ctx context.Context, clientInstance clien // Check if the backup has the required annotations to identify the associated NonAdminBackup object logger := log.FromContext(ctx) - // should run CheckVeleroBackupLabels here? - annotations := backup.GetAnnotations() annotationsStr := fmt.Sprintf("%v", annotations) diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index fb99667..74a949f 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -18,10 +18,8 @@ package controller import ( "context" - // "net/http" "fmt" "log" - "strconv" "time" "github.com/onsi/ginkgo/v2" @@ -50,9 +48,8 @@ type nonAdminBackupReconcileScenario struct { resultError error createVeleroBackup bool // TODO create a struct for each test case! - ctx context.Context - cancel context.CancelFunc - numberOfResourceVersionChanges int + ctx context.Context + cancel context.CancelFunc } func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { @@ -158,13 +155,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) - if scenario.priorStatus != nil { - nonAdminBackup.Status = *scenario.priorStatus - gomega.Expect(k8sClient.Status().Update(ctx, nonAdminBackup)).To(gomega.Succeed()) - } - priorResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) - gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) - if scenario.createVeleroBackup { veleroBackup := &v1.Backup{ ObjectMeta: metav1.ObjectMeta{ @@ -175,6 +165,14 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(k8sClient.Create(ctx, veleroBackup)).To(gomega.Succeed()) } + if scenario.priorStatus != nil { + nonAdminBackup.Status = *scenario.priorStatus + gomega.Expect(k8sClient.Status().Update(ctx, nonAdminBackup)).To(gomega.Succeed()) + } + // easy hack to test that only update call happens per reconcile + // priorResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) + // gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + result, err := (&NonAdminBackupReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, @@ -186,8 +184,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Name: testNonAdminBackupName, }}, ) - // TODO need to collect logs, so they do not appear in test run - // also assert them gomega.Expect(result).To(gomega.Equal(scenario.result)) if scenario.resultError == nil { gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) @@ -218,9 +214,9 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) } - currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) - gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) - gomega.Expect(currentResourceVersion - priorResourceVersion).To(gomega.Equal(1)) + // currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) + // gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + // gomega.Expect(currentResourceVersion - priorResourceVersion).To(gomega.Equal(1)) }, ginkgo.Entry("Should accept creation of NonAdminBackup", nonAdminBackupReconcileScenario{ namespace: "test-nonadminbackup-reconcile-1", @@ -385,7 +381,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }), ginkgo.Entry("Should accept update of NonAdminBackup phase to BackingOff", nonAdminBackupReconcileScenario{ - // this validates spec again... WRONG!!! + // this validates spec again... namespace: "test-nonadminbackup-reconcile-7", spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{ @@ -484,9 +480,6 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }).SetupWithManager(k8sManager) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - // I am seeing test overlap... - // TODO Be CAREFUL about FLAKES with this approach? - // study ref https://book.kubebuilder.io/cronjob-tutorial/writing-tests go func() { defer ginkgo.GinkgoRecover() err = k8sManager.Start(currentTestScenario.ctx) @@ -495,27 +488,17 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) gomega.Expect(k8sClient.Create(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) - originalResourceVersion, err := strconv.Atoi(nonAdminBackup.DeepCopy().ResourceVersion) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - // TODO how to know reconcile finished??? - gomega.Eventually(func() (bool, error) { - err := k8sClient.Get( - currentTestScenario.ctx, - types.NamespacedName{ - Name: testNonAdminBackupName, - Namespace: scenario.namespace, - }, - nonAdminBackup, - ) - if err != nil { - return false, err - } - currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) - if err != nil { - return false, err - } - return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges, nil - }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) + + // wait NAB reconcile + time.Sleep(1 * time.Second) + gomega.Expect(k8sClient.Get( + currentTestScenario.ctx, + types.NamespacedName{ + Name: testNonAdminBackupName, + Namespace: scenario.namespace, + }, + nonAdminBackup, + )).To(gomega.Succeed()) log.Println("Validating NonAdminBackup Status") gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) @@ -523,6 +506,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhase(constant.EmptyString))) + gomega.Expect(nonAdminBackup.Status.Conditions).To(gomega.HaveLen(len(scenario.status.Conditions))) for index := range nonAdminBackup.Status.Conditions { gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) gomega.Expect(nonAdminBackup.Status.Conditions[index].Status).To(gomega.Equal(scenario.status.Conditions[index].Status)) @@ -531,6 +515,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", } log.Println("Validation of NonAdminBackup Status completed successfully") + log.Println("Mocking VeleroBackup update to finished state") veleroBackup := &v1.Backup{} gomega.Expect(k8sClient.Get( currentTestScenario.ctx, @@ -543,7 +528,6 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", veleroBackup.Status.Phase = v1.BackupPhaseCompleted // TODO I can not call .Status().Update() for veleroBackup object: backups.velero.io "name..." not found gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) - // every update produces 2 reconciles: VeleroBackupPredicate on update -> reconcile start -> update nab status -> requeue -> reconcile start gomega.Eventually(func() (bool, error) { err := k8sClient.Get( @@ -557,14 +541,8 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", if err != nil { return false, err } - currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) - if err != nil { - return false, err - } - // why 2 ResourceVersion upgrades per veleroBackup update? - return currentResourceVersion-originalResourceVersion == scenario.numberOfResourceVersionChanges+2, nil + return nonAdminBackup.Status.VeleroBackupStatus.Phase == v1.BackupPhaseCompleted, nil }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) - gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhaseCompleted)) gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) // wait reconcile of delete event @@ -595,7 +573,6 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }, }, }, - numberOfResourceVersionChanges: 7, // should be similar to reconcile starts??? }), // ginkgo.Entry("Should DO FULL sad path", nonAdminBackupReconcileScenario{ diff --git a/internal/handler/velerobackup_handler.go b/internal/handler/velerobackup_handler.go index e521cab..d1fe4c1 100644 --- a/internal/handler/velerobackup_handler.go +++ b/internal/handler/velerobackup_handler.go @@ -31,15 +31,20 @@ import ( ) // VeleroBackupHandler contains event handlers for Velero Backup objects -type VeleroBackupHandler struct{} +type VeleroBackupHandler struct { + Logger logr.Logger +} func getVeleroBackupHandlerLogger(ctx context.Context, name, namespace string) logr.Logger { return log.FromContext(ctx).WithValues("VeleroBackupHandler", types.NamespacedName{Name: name, Namespace: namespace}) } // Create event handler -func (*VeleroBackupHandler) Create(_ context.Context, _ event.CreateEvent, _ workqueue.RateLimitingInterface) { - // Create event handler for the Backup object +func (*VeleroBackupHandler) Create(ctx context.Context, evt event.CreateEvent, _ workqueue.RateLimitingInterface) { + nameSpace := evt.Object.GetNamespace() + name := evt.Object.GetName() + logger := getVeleroBackupHandlerLogger(ctx, name, nameSpace) + logger.V(1).Info("Received Create VeleroBackupHandler") } // Update event handler @@ -50,6 +55,7 @@ func (*VeleroBackupHandler) Update(ctx context.Context, evt event.UpdateEvent, q logger.V(1).Info("Received Update VeleroBackupHandler") annotations := evt.ObjectNew.GetAnnotations() + if annotations == nil { logger.V(1).Info("Backup annotations not found") return @@ -67,9 +73,6 @@ func (*VeleroBackupHandler) Update(ctx context.Context, evt event.UpdateEvent, q return } - // TODO use GetNonAdminBackupFromVeleroBackup here - // check if I need more log here or in velero predicate - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ Name: nabOriginName, Namespace: nabOriginNamespace, diff --git a/internal/predicate/composite_predicate.go b/internal/predicate/composite_predicate.go index b067ca7..d6f9aef 100644 --- a/internal/predicate/composite_predicate.go +++ b/internal/predicate/composite_predicate.go @@ -37,10 +37,13 @@ type CompositePredicate struct { func (p CompositePredicate) Create(evt event.CreateEvent) bool { switch evt.Object.(type) { case *nacv1alpha1.NonAdminBackup: + // Apply NonAdminBackupPredicate return p.NonAdminBackupPredicate.Create(p.Context, evt) case *velerov1api.Backup: + // Apply VeleroBackupPredicate return p.VeleroBackupPredicate.Create(p.Context, evt) default: + // Unknown object type, return false return false } } diff --git a/internal/predicate/nonadminbackup_predicate.go b/internal/predicate/nonadminbackup_predicate.go index 4284329..6bce7f4 100644 --- a/internal/predicate/nonadminbackup_predicate.go +++ b/internal/predicate/nonadminbackup_predicate.go @@ -29,7 +29,9 @@ import ( ) // NonAdminBackupPredicate contains event filters for Non Admin Backup objects -type NonAdminBackupPredicate struct{} +type NonAdminBackupPredicate struct { + Logger logr.Logger +} func getNonAdminBackupPredicateLogger(ctx context.Context, name, namespace string) logr.Logger { return log.FromContext(ctx).WithValues("NonAdminBackupPredicate", types.NamespacedName{Name: name, Namespace: namespace}) @@ -40,8 +42,15 @@ func (NonAdminBackupPredicate) Create(ctx context.Context, evt event.CreateEvent nameSpace := evt.Object.GetNamespace() name := evt.Object.GetName() logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) - logger.V(1).Info("NonAdminBackupPredicate: Accepted Create event") - return true + logger.V(1).Info("NonAdminBackupPredicate: Received Create event") + if nonAdminBackup, ok := evt.Object.(*nacv1alpha1.NonAdminBackup); ok { + if nonAdminBackup.Status.Phase == constant.EmptyString || nonAdminBackup.Status.Phase == nacv1alpha1.NonAdminBackupPhaseNew { + logger.V(1).Info("NonAdminBackupPredicate: Accepted Create event") + return true + } + } + logger.V(1).Info("NonAdminBackupPredicate: Rejecting Create event") + return false } // Update event filter @@ -49,6 +58,7 @@ func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent nameSpace := evt.ObjectNew.GetNamespace() name := evt.ObjectNew.GetName() logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) + logger.V(1).Info("NonAdminBackupPredicate: Received Update event") if evt.ObjectNew.GetGeneration() != evt.ObjectOld.GetGeneration() { logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - generation change") @@ -62,18 +72,16 @@ func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent // New phase set, reconcile if oldPhase == constant.EmptyString && newPhase != constant.EmptyString { - logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - phase change") + logger.V(1).Info("NonAdminBsackupPredicate: Accepted Update event - phase change") return true } else if oldPhase == nacv1alpha1.NonAdminBackupPhaseNew && newPhase == nacv1alpha1.NonAdminBackupPhaseCreated { - // This is HARD to understand and TEST - // even though reconcile will reach Reconcile loop end - // this will trigger a new reconcile logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - phase created") return true } } } logger.V(1).Info("NonAdminBackupPredicate: Rejecting Update event") + return false } @@ -92,6 +100,5 @@ func (NonAdminBackupPredicate) Generic(ctx context.Context, evt event.GenericEve name := evt.Object.GetName() logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) logger.V(1).Info("NonAdminBackupPredicate: Accepted Generic event") - // refactor: all functions start the same way, move this initialization to a separate function return true } diff --git a/internal/predicate/velerobackup_predicate.go b/internal/predicate/velerobackup_predicate.go index 5734ba0..7879937 100644 --- a/internal/predicate/velerobackup_predicate.go +++ b/internal/predicate/velerobackup_predicate.go @@ -23,6 +23,8 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/migtools/oadp-non-admin/internal/common/function" ) // VeleroBackupPredicate contains event filters for Velero Backup objects @@ -30,6 +32,7 @@ type VeleroBackupPredicate struct { // We are watching only Velero Backup objects within // namespace where OADP is. OadpVeleroNamespace string + Logger logr.Logger } // TODO try to remove calls to get logger functions, try to initialize it @@ -38,8 +41,17 @@ func getBackupPredicateLogger(ctx context.Context, name, namespace string) logr. } // Create event filter -func (VeleroBackupPredicate) Create(_ context.Context, _ event.CreateEvent) bool { - return false +func (veleroBackupPredicate VeleroBackupPredicate) Create(ctx context.Context, evt event.CreateEvent) bool { + nameSpace := evt.Object.GetNamespace() + if nameSpace != veleroBackupPredicate.OadpVeleroNamespace { + return false + } + + name := evt.Object.GetName() + logger := getBackupPredicateLogger(ctx, name, nameSpace) + logger.V(1).Info("VeleroBackupPredicate: Received Create event") + + return function.CheckVeleroBackupLabels(evt.Object.GetLabels()) } // Update event filter @@ -48,10 +60,7 @@ func (veleroBackupPredicate VeleroBackupPredicate) Update(ctx context.Context, e name := evt.ObjectNew.GetName() logger := getBackupPredicateLogger(ctx, name, nameSpace) logger.V(1).Info("VeleroBackupPredicate: Received Update event") - // TODO log accepted or not - // should not check labels? return nameSpace == veleroBackupPredicate.OadpVeleroNamespace - // refactor idea, move all validation to a function, predicate functions would just need to call it and log info } // Delete event filter From 4fd01fa1928a3e907b0d30011c7341bcd63c5bae Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Fri, 6 Sep 2024 12:46:16 -0300 Subject: [PATCH 10/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- Makefile | 3 +- internal/common/function/function.go | 41 ----- .../controller/nonadminbackup_controller.go | 99 +++++++---- .../nonadminbackup_controller_test.go | 168 +++++++++--------- 4 files changed, 154 insertions(+), 157 deletions(-) diff --git a/Makefile b/Makefile index b57d5f9..19f0dfd 100644 --- a/Makefile +++ b/Makefile @@ -224,8 +224,7 @@ editorconfig: $(LOCALBIN) ## Download editorconfig locally if necessary. mv $(LOCALBIN)/$${ec_binary} $(EC) ;\ } -# TODO increase -COVERAGE_THRESHOLD=50 +COVERAGE_THRESHOLD=60 .PHONY: ci ci: simulation-test lint docker-build hadolint check-generate check-manifests ec check-images ## Run all project continuous integration (CI) checks locally. diff --git a/internal/common/function/function.go b/internal/common/function/function.go index b1929c1..5ff7034 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -23,9 +23,7 @@ import ( "encoding/hex" "fmt" "os" - "reflect" - "github.com/go-logr/logr" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -145,45 +143,6 @@ func GenerateVeleroBackupName(namespace, nabName string) string { return veleroBackupName } -// UpdateNonAdminBackupFromVeleroBackup update, if necessary, NonAdminBackup object fields related to referenced Velero Backup object, if no error occurs -func UpdateNonAdminBackupFromVeleroBackup(ctx context.Context, r client.Client, logger logr.Logger, nab *nacv1alpha1.NonAdminBackup, veleroBackup *velerov1api.Backup) (bool, error) { - logger.V(1).Info("NonAdminBackup BackupSpec and VeleroBackupStatus - request to update") - - if reflect.DeepEqual(nab.Status.VeleroBackupStatus, &veleroBackup.Status) && reflect.DeepEqual(nab.Spec.BackupSpec, &veleroBackup.Spec) { - // No change, no need to update - logger.V(1).Info("NonAdminBackup BackupSpec and BackupStatus - nothing to update") - return false, nil - } - - // Check if BackupStatus needs to be updated - if !reflect.DeepEqual(nab.Status.VeleroBackupStatus, &veleroBackup.Status) || nab.Status.VeleroBackupName != veleroBackup.Name || nab.Status.VeleroBackupNamespace != veleroBackup.Namespace { - nab.Status.VeleroBackupStatus = veleroBackup.Status.DeepCopy() - nab.Status.VeleroBackupName = veleroBackup.Name - nab.Status.VeleroBackupNamespace = veleroBackup.Namespace - if err := r.Status().Update(ctx, nab); err != nil { - logger.Error(err, "NonAdminBackup BackupStatus - Failed to update") - return false, err - } - logger.V(1).Info("NonAdminBackup BackupStatus - updated") - return true, nil - } - logger.V(1).Info("NonAdminBackup BackupStatus - up to date") - - // Check if BackupSpec needs to be updated - if !reflect.DeepEqual(nab.Spec.BackupSpec, &veleroBackup.Spec) { - nab.Spec.BackupSpec = veleroBackup.Spec.DeepCopy() - if err := r.Update(ctx, nab); err != nil { - logger.Error(err, "NonAdminBackup BackupSpec - Failed to update") - return false, err - } - logger.V(1).Info("NonAdminBackup BackupSpec - updated") - return true, nil - } - logger.V(1).Info("NonAdminBackup BackupSpec - up to date") - - return false, nil -} - // CheckVeleroBackupLabels return true if Velero Backup object has required Non Admin labels, false otherwise func CheckVeleroBackupLabels(labels map[string]string) bool { // TODO also need to check for constant.OadpLabel label? diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index 1198bc1..78ff3d0 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -20,6 +20,7 @@ package controller import ( "context" "errors" + "reflect" "github.com/go-logr/logr" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -48,6 +49,13 @@ type NonAdminBackupReconciler struct { OADPNamespace string } +const ( + phaseUpdateRequeue = "NonAdminBackup - Requeue after Phase Update" + conditionUpdateRequeue = "NonAdminBackup - Requeue after Condition Update" + phaseUpdateError = "Failed to update NonAdminBackup Phase" + conditionUpdateError = "Failed to update NonAdminBackup Condition" +) + // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminbackups,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminbackups/status,verbs=get;update;patch // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminbackups/finalizers,verbs=update @@ -55,7 +63,8 @@ type NonAdminBackupReconciler struct { // +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=get;list;watch;create;update;patch // Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the NonAdminBackup to the desired state. +// move the current state of the cluster closer to the desired state, +// defined in NonAdminBackup object Spec. func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := log.FromContext(ctx) logger.V(1).Info("NonAdminBackup Reconcile start") @@ -74,35 +83,28 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque reconcileExit, reconcileRequeue, reconcileErr := r.Init(ctx, logger, &nab) if reconcileRequeue { - logger.V(1).Info("NonAdminBackup Reconcile requeue") return ctrl.Result{Requeue: true}, reconcileErr } else if reconcileExit && reconcileErr != nil { return ctrl.Result{}, reconcileErr } else if reconcileExit { - logger.V(1).Info("NonAdminBackup Reconcile exit") return ctrl.Result{}, nil } - // would not be better to validate first? reconcileExit, reconcileRequeue, reconcileErr = r.ValidateSpec(ctx, logger, &nab) if reconcileRequeue { - logger.V(1).Info("NonAdminBackup Reconcile requeue") return ctrl.Result{Requeue: true}, reconcileErr } else if reconcileExit && reconcileErr != nil { return ctrl.Result{}, reconcileErr } else if reconcileExit { - logger.V(1).Info("NonAdminBackup Reconcile exit") return ctrl.Result{}, nil } reconcileExit, reconcileRequeue, reconcileErr = r.UpdateSpecStatus(ctx, logger, &nab) if reconcileRequeue { - logger.V(1).Info("NonAdminBackup Reconcile requeue") return ctrl.Result{Requeue: true}, reconcileErr } else if reconcileExit && reconcileErr != nil { return ctrl.Result{}, reconcileErr } else if reconcileExit { - logger.V(1).Info("NonAdminBackup Reconcile exit") return ctrl.Result{}, nil } @@ -129,11 +131,11 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseNew) if updated { if err := r.Status().Update(ctx, nab); err != nil { - logger.Error(err, "Failed to update NonAdminBackup Phase") + logger.Error(err, phaseUpdateError) return true, false, err } - logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") + logger.V(1).Info(phaseUpdateRequeue) return false, true, nil } } @@ -150,10 +152,10 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log // logrLogger: Logger instance for logging messages. // nab: Pointer to the NonAdminBackup object. // -// The function validates the BackupSpec from the NonAdminBackup object. +// The function validates the Spec from the NonAdminBackup object. // If the BackupSpec is invalid, the function sets the NonAdminBackup phase to "BackingOff". -// If the BackupSpec is invalid, the function sets the NonAdminBackup condition to "InvalidBackupSpec". -// If the BackupSpec is valid, the function sets the NonAdminBackup condition to "BackupAccepted". +// If the BackupSpec is invalid, the function sets the NonAdminBackup condition Accepted to "False". +// If the BackupSpec is valid, the function sets the NonAdminBackup condition Accepted to "True". func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { logger := logrLogger @@ -165,11 +167,11 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseBackingOff) if updated { if updateErr := r.Status().Update(ctx, nab); updateErr != nil { - logger.Error(updateErr, "Failed to update NonAdminBackup Phase") + logger.Error(updateErr, phaseUpdateError) return true, false, updateErr } - logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") + logger.V(1).Info(phaseUpdateRequeue) return false, true, nil } @@ -178,12 +180,12 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger Type: string(nacv1alpha1.NonAdminConditionAccepted), Status: metav1.ConditionFalse, Reason: "InvalidBackupSpec", - Message: "NonAdminBackup does not contain valid BackupSpec", + Message: "NonAdminBackup does not contain valid Spec", }, ) if updated { if updateErr := r.Status().Update(ctx, nab); updateErr != nil { - logger.Error(updateErr, "Failed to update NonAdminBackup Condition") + logger.Error(updateErr, conditionUpdateError) return true, false, updateErr } } @@ -201,11 +203,11 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger ) if updated { if err := r.Status().Update(ctx, nab); err != nil { - logger.Error(err, "Failed to update NonAdminBackup Condition") + logger.Error(err, conditionUpdateError) return true, false, err } - logger.V(1).Info("NonAdminBackup - Requeue after Condition Update") + logger.V(1).Info(conditionUpdateRequeue) return false, true, nil } @@ -223,7 +225,7 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger // // The function generates the name for the Velero Backup object based on the provided namespace and name. // It then checks if a Velero Backup object with that name already exists. If it does not exist, it creates a new one -// and updates NonAdminBackup Status. Otherwise, updates NonAdminBackup VeleroBackup Status based on Velero Backup object Status. +// and updates NonAdminBackup Status. Otherwise, updates NonAdminBackup VeleroBackup Spec and Status based on Velero Backup object Spec and Status. // The function returns boolean values indicating whether the reconciliation loop should exit or requeue func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLogger logr.Logger, nab *nacv1alpha1.NonAdminBackup) (exitReconcile bool, requeueReconcile bool, errorReconcile error) { logger := logrLogger @@ -283,11 +285,11 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseCreated) if updated { if err := r.Status().Update(ctx, nab); err != nil { - logger.Error(err, "Failed to update NonAdminBackup Phase") + logger.Error(err, phaseUpdateError) return true, false, err } - logger.V(1).Info("NonAdminBackup - Requeue after Phase Update") + logger.V(1).Info(phaseUpdateRequeue) return false, true, nil } @@ -304,11 +306,11 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog ) if updated { if err := r.Status().Update(ctx, nab); err != nil { - logger.Error(err, "Failed to update NonAdminBackup Condition") + logger.Error(err, conditionUpdateError) return true, false, err } - logger.V(1).Info("NonAdminBackup - Requeue after Condition Update") + logger.V(1).Info(conditionUpdateRequeue) return false, true, nil } @@ -316,16 +318,29 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog // The VeleroBackup within NonAdminBackup will // be reverted back to the previous state - the state which created VeleroBackup // in a first place, so they will be in sync. - veleroBackupLogger.Info("VeleroBackup already exists, updating NonAdminBackup Status") - updatedNab, errBackupUpdate := function.UpdateNonAdminBackupFromVeleroBackup(ctx, r.Client, logger, nab, &veleroBackup) - // Regardless if the status was updated or not, we should not - // requeue here as it was only status update. AND SPEC??? - if errBackupUpdate != nil { - return true, false, errBackupUpdate - } else if updatedNab { - logger.V(1).Info("NonAdminBackup - Requeue after Status Update") // AND SPEC??? + veleroBackupLogger.Info("VeleroBackup already exists, checking if NonAdminBackup VeleroBackupSpec and VeleroBackupStatus needs update") + updated = updateNonAdminBackupVeleroBackupStatus(nab, &veleroBackup) + if updated { + if err := r.Status().Update(ctx, nab); err != nil { + veleroBackupLogger.Error(err, "NonAdminBackup BackupStatus - Failed to update") + return true, false, err + } + + logger.V(1).Info("NonAdminBackup - Requeue after Status Update") + return false, true, nil + } + updated = updateNonAdminBackupVeleroBackupSpec(nab, &veleroBackup) + if updated { + if err := r.Update(ctx, nab); err != nil { + veleroBackupLogger.Error(err, "NonAdminBackup BackupSpec - Failed to update") + return true, false, err + } + + logger.V(1).Info("NonAdminBackup - Requeue after Spec Update") return false, true, nil } + + logger.V(1).Info("NonAdminBackup VeleroBackupSpec and VeleroBackupStatus already up to date") return true, false, nil } @@ -343,7 +358,7 @@ func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -// UpdateNonAdminPhase updates the phase of a NonAdminBackup object with the provided phase. +// updateNonAdminPhase updates the phase of a NonAdminBackup object with the provided phase. func updateNonAdminPhase(nab *nacv1alpha1.NonAdminBackup, phase nacv1alpha1.NonAdminBackupPhase) bool { // Ensure phase is valid if phase == constant.EmptyString { @@ -357,3 +372,21 @@ func updateNonAdminPhase(nab *nacv1alpha1.NonAdminBackup, phase nacv1alpha1.NonA nab.Status.Phase = phase return true } + +func updateNonAdminBackupVeleroBackupStatus(nab *nacv1alpha1.NonAdminBackup, veleroBackup *velerov1api.Backup) bool { + if !reflect.DeepEqual(nab.Status.VeleroBackupStatus, &veleroBackup.Status) || nab.Status.VeleroBackupName != veleroBackup.Name || nab.Status.VeleroBackupNamespace != veleroBackup.Namespace { + nab.Status.VeleroBackupStatus = veleroBackup.Status.DeepCopy() + nab.Status.VeleroBackupName = veleroBackup.Name + nab.Status.VeleroBackupNamespace = veleroBackup.Namespace + return true + } + return false +} + +func updateNonAdminBackupVeleroBackupSpec(nab *nacv1alpha1.NonAdminBackup, veleroBackup *velerov1api.Backup) bool { + if !reflect.DeepEqual(nab.Spec.BackupSpec, &veleroBackup.Spec) { + nab.Spec.BackupSpec = veleroBackup.Spec.DeepCopy() + return true + } + return false +} diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 74a949f..7fd6409 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -38,18 +38,24 @@ import ( const testNonAdminBackupName = "test-non-admin-backup" -type nonAdminBackupReconcileScenario struct { +type nonAdminBackupSingleReconcileScenario struct { namespace string oadpNamespace string - spec nacv1alpha1.NonAdminBackupSpec + resultError error priorStatus *nacv1alpha1.NonAdminBackupStatus + spec nacv1alpha1.NonAdminBackupSpec status nacv1alpha1.NonAdminBackupStatus result reconcile.Result - resultError error createVeleroBackup bool - // TODO create a struct for each test case! - ctx context.Context - cancel context.CancelFunc +} + +type nonAdminBackupFullReconcileScenario struct { + ctx context.Context + cancel context.CancelFunc + namespace string + oadpNamespace string + spec nacv1alpha1.NonAdminBackupSpec + status nacv1alpha1.NonAdminBackupStatus } func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { @@ -65,8 +71,8 @@ func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupS var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile function", func() { var ( ctx = context.Background() - currentTestScenario nonAdminBackupReconcileScenario - updateTestScenario = func(scenario nonAdminBackupReconcileScenario) { + currentTestScenario nonAdminBackupSingleReconcileScenario + updateTestScenario = func(scenario nonAdminBackupSingleReconcileScenario) { currentTestScenario = scenario } ) @@ -101,8 +107,8 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(k8sClient.Delete(ctx, namespace)).To(gomega.Succeed()) }) - ginkgo.DescribeTable("Reconcile should NOT return an error on Delete event", - func(scenario nonAdminBackupReconcileScenario) { + ginkgo.DescribeTable("should Reconcile on Delete event", + func(scenario nonAdminBackupSingleReconcileScenario) { updateTestScenario(scenario) namespace := &corev1.Namespace{ @@ -126,14 +132,14 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(result).To(gomega.Equal(scenario.result)) gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) }, - ginkgo.Entry("Should accept deletion of NonAdminBackup", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept deletion of NonAdminBackup", nonAdminBackupSingleReconcileScenario{ namespace: "test-nonadminbackup-reconcile-0", result: reconcile.Result{}, }), ) - ginkgo.DescribeTable("Reconcile should NOT return an error on Create and Update events", - func(scenario nonAdminBackupReconcileScenario) { + ginkgo.DescribeTable("should Reconcile on Create and Update events and on Requeue", + func(scenario nonAdminBackupSingleReconcileScenario) { updateTestScenario(scenario) namespace := &corev1.Namespace{ @@ -169,7 +175,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func nonAdminBackup.Status = *scenario.priorStatus gomega.Expect(k8sClient.Status().Update(ctx, nonAdminBackup)).To(gomega.Succeed()) } - // easy hack to test that only update call happens per reconcile + // easy hack to test that only one update call happens per reconcile // priorResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) // gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) @@ -214,18 +220,19 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) } + // easy hack to test that only one update call happens per reconcile // currentResourceVersion, err := strconv.Atoi(nonAdminBackup.ResourceVersion) // gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) // gomega.Expect(currentResourceVersion - priorResourceVersion).To(gomega.Equal(1)) }, - ginkgo.Entry("Should accept creation of NonAdminBackup", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept creation of NonAdminBackup", nonAdminBackupSingleReconcileScenario{ namespace: "test-nonadminbackup-reconcile-1", result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase to new", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept update of NonAdminBackup phase to new", nonAdminBackupSingleReconcileScenario{ namespace: "test-nonadminbackup-reconcile-2", spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, @@ -246,7 +253,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }), - ginkgo.Entry("Should accept update of NonAdminBackup Condition to Accepted True", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept update of NonAdminBackup Condition to Accepted True", nonAdminBackupSingleReconcileScenario{ namespace: "test-nonadminbackup-reconcile-3", oadpNamespace: "test-nonadminbackup-reconcile-3-oadp", spec: nacv1alpha1.NonAdminBackupSpec{ @@ -278,7 +285,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase to created", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept update of NonAdminBackup phase to created", nonAdminBackupSingleReconcileScenario{ namespace: "test-nonadminbackup-reconcile-4", oadpNamespace: "test-nonadminbackup-reconcile-4-oadp", spec: nacv1alpha1.NonAdminBackupSpec{ @@ -317,7 +324,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }), - ginkgo.Entry("Should accept update of NonAdminBackup Condition to Queued True", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept update of NonAdminBackup Condition to Queued True", nonAdminBackupSingleReconcileScenario{ namespace: "test-nonadminbackup-reconcile-5", oadpNamespace: "test-nonadminbackup-reconcile-5-oadp", spec: nacv1alpha1.NonAdminBackupSpec{ @@ -365,7 +372,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase to new - invalid spec", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept update of NonAdminBackup phase to new - invalid spec", nonAdminBackupSingleReconcileScenario{ namespace: "test-nonadminbackup-reconcile-6", spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{ @@ -380,7 +387,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase to BackingOff", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should accept update of NonAdminBackup phase to BackingOff", nonAdminBackupSingleReconcileScenario{ // this validates spec again... namespace: "test-nonadminbackup-reconcile-7", spec: nacv1alpha1.NonAdminBackupSpec{ @@ -399,7 +406,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionFalse, Reason: "InvalidBackupSpec", - Message: "NonAdminBackup does not contain valid BackupSpec", + Message: "NonAdminBackup does not contain valid Spec", }, }, }, @@ -409,8 +416,8 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", func() { var ( - currentTestScenario nonAdminBackupReconcileScenario - updateTestScenario = func(scenario nonAdminBackupReconcileScenario) { + currentTestScenario nonAdminBackupFullReconcileScenario + updateTestScenario = func(scenario nonAdminBackupFullReconcileScenario) { ctx, cancel := context.WithCancel(context.Background()) scenario.ctx = ctx scenario.cancel = cancel @@ -434,24 +441,12 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, namespace)).To(gomega.Succeed()) currentTestScenario.cancel() - // https://github.com/kubernetes-sigs/controller-runtime/issues/1280 - // clientTransport := &http.Transport{} - // clientTransport.CloseIdleConnections() - // gomega.Eventually(func() error { - // ret := ctx.Done() - // if ret != nil { - // return fmt.Errorf("not ready :(") - // } - // close(ret) - // return nil - // }, 5*time.Second, 1*time.Millisecond).Should(gomega.BeNil()) - // TODO HOW to wait process finish? - // this is still being finished in next step + // wait cancel time.Sleep(1 * time.Second) }) - ginkgo.DescribeTable("Reconcile loop should succeed", - func(scenario nonAdminBackupReconcileScenario) { + ginkgo.DescribeTable("full reconcile loop", + func(scenario nonAdminBackupFullReconcileScenario) { updateTestScenario(scenario) namespace := &corev1.Namespace{ @@ -486,6 +481,8 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run manager") }() + // wait manager start + time.Sleep(1 * time.Second) nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) gomega.Expect(k8sClient.Create(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) @@ -504,7 +501,9 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) gomega.Expect(nonAdminBackup.Status.VeleroBackupName).To(gomega.Equal(scenario.status.VeleroBackupName)) gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) - gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhase(constant.EmptyString))) + if len(scenario.status.VeleroBackupName) > 0 { + gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhase(constant.EmptyString))) + } gomega.Expect(nonAdminBackup.Status.Conditions).To(gomega.HaveLen(len(scenario.status.Conditions))) for index := range nonAdminBackup.Status.Conditions { @@ -515,40 +514,42 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", } log.Println("Validation of NonAdminBackup Status completed successfully") - log.Println("Mocking VeleroBackup update to finished state") - veleroBackup := &v1.Backup{} - gomega.Expect(k8sClient.Get( - currentTestScenario.ctx, - types.NamespacedName{ - Name: scenario.status.VeleroBackupName, - Namespace: scenario.oadpNamespace, - }, - veleroBackup, - )).To(gomega.Succeed()) - veleroBackup.Status.Phase = v1.BackupPhaseCompleted - // TODO I can not call .Status().Update() for veleroBackup object: backups.velero.io "name..." not found - gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) - - gomega.Eventually(func() (bool, error) { - err := k8sClient.Get( + if len(scenario.status.VeleroBackupName) > 0 { + log.Println("Mocking VeleroBackup update to finished state") + veleroBackup := &v1.Backup{} + gomega.Expect(k8sClient.Get( currentTestScenario.ctx, types.NamespacedName{ - Name: testNonAdminBackupName, - Namespace: scenario.namespace, + Name: scenario.status.VeleroBackupName, + Namespace: scenario.oadpNamespace, }, - nonAdminBackup, - ) - if err != nil { - return false, err - } - return nonAdminBackup.Status.VeleroBackupStatus.Phase == v1.BackupPhaseCompleted, nil - }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) + veleroBackup, + )).To(gomega.Succeed()) + veleroBackup.Status.Phase = v1.BackupPhaseCompleted + // TODO I can not call .Status().Update() for veleroBackup object: backups.velero.io "name..." not found + gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) + + gomega.Eventually(func() (bool, error) { + err := k8sClient.Get( + currentTestScenario.ctx, + types.NamespacedName{ + Name: testNonAdminBackupName, + Namespace: scenario.namespace, + }, + nonAdminBackup, + ) + if err != nil { + return false, err + } + return nonAdminBackup.Status.VeleroBackupStatus.Phase == v1.BackupPhaseCompleted, nil + }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) + } gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) // wait reconcile of delete event time.Sleep(1 * time.Second) }, - ginkgo.Entry("Should create, update and delete NonAdminBackup", nonAdminBackupReconcileScenario{ + ginkgo.Entry("Should update NonAdminBackup until VeleroBackup completes and than delete it", nonAdminBackupFullReconcileScenario{ namespace: "test-nonadminbackup-reconcile-full-1", oadpNamespace: "test-nonadminbackup-reconcile-full-1-oadp", spec: nacv1alpha1.NonAdminBackupSpec{ @@ -574,20 +575,25 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }, }, }), - - // ginkgo.Entry("Should DO FULL sad path", nonAdminBackupReconcileScenario{ - // namespace: "test-nonadminbackup-reconcile-full-2", - // oadpNamespace: "test-nonadminbackup-reconcile-full-2-oadp", - // spec: nacv1alpha1.NonAdminBackupSpec{}, - // priorStatus: &nacv1alpha1.NonAdminBackupStatus{ - // Phase: nacv1alpha1.NonAdminBackupPhaseNew, - // }, - // status: nacv1alpha1.NonAdminBackupStatus{ - // Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, - // }, - // numberOfResourceVersionChanges: 2, - // }), - // events 3: 1 create, 2 update (1 rejected) - // 2 reconcile starts + ginkgo.Entry("Should update NonAdminBackup until it invalidates and than delete it", nonAdminBackupFullReconcileScenario{ + namespace: "test-nonadminbackup-reconcile-full-2", + oadpNamespace: "test-nonadminbackup-reconcile-full-2-oadp", + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{ + IncludedNamespaces: []string{"not-valid"}, + }, + }, + status: nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionFalse, + Reason: "InvalidBackupSpec", + Message: "NonAdminBackup does not contain valid Spec", + }, + }, + }, + }), ) }) From cbcffa44247768e153c40b1dbfafbfbc8f1e2470 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Fri, 6 Sep 2024 12:48:09 -0300 Subject: [PATCH 11/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- internal/common/function/function.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 5ff7034..98a4add 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -91,8 +91,8 @@ func containsOnlyNamespace(namespaces []string, namespace string) bool { // GetBackupSpecFromNonAdminBackup return BackupSpec object from NonAdminBackup spec, if no error occurs func GetBackupSpecFromNonAdminBackup(nonAdminBackup *nacv1alpha1.NonAdminBackup) (*velerov1api.BackupSpec, error) { // TODO https://github.com/migtools/oadp-non-admin/issues/60 + // this should be Kubernetes API validation if nonAdminBackup.Spec.BackupSpec == nil { - // this should be Kubernetes API validation return nil, fmt.Errorf("BackupSpec is not defined") } From 6b1b5702551ae55853d9467928ecc79757510f16 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Sun, 8 Sep 2024 14:18:03 -0300 Subject: [PATCH 12/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- cmd/main.go | 3 +- hack/extra-crds/velero.io_backups.yaml | 428 ++++++++---------- internal/common/function/function.go | 6 - .../controller/nonadminbackup_controller.go | 71 ++- .../nonadminbackup_controller_test.go | 406 ++++++++++------- .../predicate/nonadminbackup_predicate.go | 2 +- 6 files changed, 454 insertions(+), 462 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 0d81d67..a363de5 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -38,7 +38,6 @@ import ( nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" "github.com/migtools/oadp-non-admin/internal/common/constant" - "github.com/migtools/oadp-non-admin/internal/common/function" "github.com/migtools/oadp-non-admin/internal/controller" ) @@ -99,7 +98,7 @@ func main() { TLSOpts: tlsOpts, }) - oadpNamespace := function.GetOADPNamespace() + oadpNamespace := os.Getenv(constant.NamespaceEnvVar) if len(oadpNamespace) == 0 { setupLog.Error(fmt.Errorf("%v environment variable is empty", constant.NamespaceEnvVar), "environment variable must be set") os.Exit(1) diff --git a/hack/extra-crds/velero.io_backups.yaml b/hack/extra-crds/velero.io_backups.yaml index d14b13d..dd51184 100644 --- a/hack/extra-crds/velero.io_backups.yaml +++ b/hack/extra-crds/velero.io_backups.yaml @@ -1,10 +1,11 @@ -# from https://github.com/openshift/oadp-operator/blob/master/config/crd/bases/velero.io_backups.yaml +# from https://github.com/openshift/velero/blob/d8101a298016/config/crd/v1/bases/velero.io_backups.yaml +# from go.mod replace github.com/vmware-tanzu/velero => github.com/openshift/velero v0.10.2-0.20231024175012-d8101a298016 --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.12.0 name: backups.velero.io spec: group: velero.io @@ -18,24 +19,18 @@ spec: - name: v1 schema: openAPIV3Schema: - description: |- - Backup is a Velero resource that represents the capture of Kubernetes + description: Backup is a Velero resource that represents the capture of Kubernetes cluster state at a point in time (API objects and associated volume state). properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -43,63 +38,55 @@ spec: description: BackupSpec defines the specification for a Velero backup. properties: csiSnapshotTimeout: - description: |- - CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to - ReadyToUse during creation, before returning error as timeout. - The default value is 10 minute. + description: CSISnapshotTimeout specifies the time used to wait for + CSI VolumeSnapshot status turns to ReadyToUse during creation, before + returning error as timeout. The default value is 10 minute. type: string datamover: - description: |- - DataMover specifies the data mover to be used by the backup. - If DataMover is "" or "velero", the built-in data mover will be used. + description: DataMover specifies the data mover to be used by the + backup. If DataMover is "" or "velero", the built-in data mover + will be used. type: string defaultVolumesToFsBackup: - description: |- - DefaultVolumesToFsBackup specifies whether pod volume file system backup should be used - for all volumes by default. + description: DefaultVolumesToFsBackup specifies whether pod volume + file system backup should be used for all volumes by default. nullable: true type: boolean defaultVolumesToRestic: - description: |- - DefaultVolumesToRestic specifies whether restic should be used to take a - backup of all pod volumes by default. - - - Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead. + description: "DefaultVolumesToRestic specifies whether restic should + be used to take a backup of all pod volumes by default. \n Deprecated: + this field is no longer used and will be removed entirely in future. + Use DefaultVolumesToFsBackup instead." nullable: true type: boolean excludedClusterScopedResources: - description: |- - ExcludedClusterScopedResources is a slice of cluster-scoped - resource type names to exclude from the backup. - If set to "*", all cluster-scoped resource types are excluded. - The default value is empty. + description: ExcludedClusterScopedResources is a slice of cluster-scoped + resource type names to exclude from the backup. If set to "*", all + cluster-scoped resource types are excluded. The default value is + empty. items: type: string nullable: true type: array excludedNamespaceScopedResources: - description: |- - ExcludedNamespaceScopedResources is a slice of namespace-scoped - resource type names to exclude from the backup. - If set to "*", all namespace-scoped resource types are excluded. - The default value is empty. + description: ExcludedNamespaceScopedResources is a slice of namespace-scoped + resource type names to exclude from the backup. If set to "*", all + namespace-scoped resource types are excluded. The default value + is empty. items: type: string nullable: true type: array excludedNamespaces: - description: |- - ExcludedNamespaces contains a list of namespaces that are not - included in the backup. + description: ExcludedNamespaces contains a list of namespaces that + are not included in the backup. items: type: string nullable: true type: array excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. + description: ExcludedResources is a slice of resource names that are + not included in the backup. items: type: string nullable: true @@ -112,9 +99,9 @@ spec: description: Resources are hooks that should be executed when backing up individual instances of a resource. items: - description: |- - BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on - the rules defined for namespaces, resources, and label selector. + description: BackupResourceHookSpec defines one or more BackupResourceHooks + that should be executed based on the rules defined for namespaces, + resources, and label selector. properties: excludedNamespaces: description: ExcludedNamespaces specifies the namespaces @@ -131,17 +118,17 @@ spec: nullable: true type: array includedNamespaces: - description: |- - IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + description: IncludedNamespaces specifies the namespaces + to which this hook spec applies. If empty, it applies to all namespaces. items: type: string nullable: true type: array includedResources: - description: |- - IncludedResources specifies the resources to which this hook spec applies. If empty, it applies - to all resources. + description: IncludedResources specifies the resources to + which this hook spec applies. If empty, it applies to + all resources. items: type: string nullable: true @@ -155,8 +142,8 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates the key and values. properties: key: @@ -164,16 +151,17 @@ spec: applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. items: type: string type: array @@ -185,10 +173,11 @@ spec: matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -196,9 +185,10 @@ spec: description: Name is the name of this hook. type: string post: - description: |- - PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. - These are executed after all "additional items" from item actions are processed. + description: PostHooks is a list of BackupResourceHooks + to execute after storing the item in the backup. These + are executed after all "additional items" from item actions + are processed. items: description: BackupResourceHook defines a hook for a resource. properties: @@ -213,9 +203,10 @@ spec: minItems: 1 type: array container: - description: |- - Container is the container in the pod where the command should be executed. If not specified, - the pod's first container is used. + description: Container is the container in the + pod where the command should be executed. If + not specified, the pod's first container is + used. type: string onError: description: OnError specifies how Velero should @@ -226,9 +217,9 @@ spec: - Fail type: string timeout: - description: |- - Timeout defines the maximum amount of time Velero should wait for the hook to complete before - considering the execution a failure. + description: Timeout defines the maximum amount + of time Velero should wait for the hook to complete + before considering the execution a failure. type: string required: - command @@ -238,9 +229,10 @@ spec: type: object type: array pre: - description: |- - PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. - These are executed before any "additional items" from item actions are processed. + description: PreHooks is a list of BackupResourceHooks to + execute prior to storing the item in the backup. These + are executed before any "additional items" from item actions + are processed. items: description: BackupResourceHook defines a hook for a resource. properties: @@ -255,9 +247,10 @@ spec: minItems: 1 type: array container: - description: |- - Container is the container in the pod where the command should be executed. If not specified, - the pod's first container is used. + description: Container is the container in the + pod where the command should be executed. If + not specified, the pod's first container is + used. type: string onError: description: OnError specifies how Velero should @@ -268,9 +261,9 @@ spec: - Fail type: string timeout: - description: |- - Timeout defines the maximum amount of time Velero should wait for the hook to complete before - considering the execution a failure. + description: Timeout defines the maximum amount + of time Velero should wait for the hook to complete + before considering the execution a failure. type: string required: - command @@ -286,81 +279,74 @@ spec: type: array type: object includeClusterResources: - description: |- - IncludeClusterResources specifies whether cluster-scoped resources - should be included for consideration in the backup. + description: IncludeClusterResources specifies whether cluster-scoped + resources should be included for consideration in the backup. nullable: true type: boolean includedClusterScopedResources: - description: |- - IncludedClusterScopedResources is a slice of cluster-scoped - resource type names to include in the backup. - If set to "*", all cluster-scoped resource types are included. - The default value is empty, which means only related - cluster-scoped resources are included. + description: IncludedClusterScopedResources is a slice of cluster-scoped + resource type names to include in the backup. If set to "*", all + cluster-scoped resource types are included. The default value is + empty, which means only related cluster-scoped resources are included. items: type: string nullable: true type: array includedNamespaceScopedResources: - description: |- - IncludedNamespaceScopedResources is a slice of namespace-scoped - resource type names to include in the backup. - The default value is "*". + description: IncludedNamespaceScopedResources is a slice of namespace-scoped + resource type names to include in the backup. The default value + is "*". items: type: string nullable: true type: array includedNamespaces: - description: |- - IncludedNamespaces is a slice of namespace names to include objects - from. If empty, all namespaces are included. + description: IncludedNamespaces is a slice of namespace names to include + objects from. If empty, all namespaces are included. items: type: string nullable: true type: array includedResources: - description: |- - IncludedResources is a slice of resource names to include + description: IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included. items: type: string nullable: true type: array itemOperationTimeout: - description: |- - ItemOperationTimeout specifies the time used to wait for asynchronous BackupItemAction operations - The default value is 4 hour. + description: ItemOperationTimeout specifies the time used to wait + for asynchronous BackupItemAction operations The default value is + 1 hour. type: string labelSelector: - description: |- - LabelSelector is a metav1.LabelSelector to filter with - when adding individual objects to the backup. If empty - or nil, all objects are included. Optional. + description: LabelSelector is a metav1.LabelSelector to filter with + when adding individual objects to the backup. If empty or nil, all + objects are included. Optional. nullable: true properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -373,10 +359,11 @@ spec: matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -388,41 +375,40 @@ spec: type: object type: object orLabelSelectors: - description: |- - OrLabelSelectors is list of metav1.LabelSelector to filter with - when adding individual objects to the backup. If multiple provided + description: OrLabelSelectors is list of metav1.LabelSelector to filter + with when adding individual objects to the backup. If multiple provided they will be joined by the OR operator. LabelSelector as well as - OrLabelSelectors cannot co-exist in backup request, only one of them - can be used. + OrLabelSelectors cannot co-exist in backup request, only one of + them can be used. items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An empty + label selector matches all objects. A null label selector matches + no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. items: type: string type: array @@ -434,10 +420,11 @@ spec: matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -446,10 +433,11 @@ spec: orderedResources: additionalProperties: type: string - description: |- - OrderedResources specifies the backup order of resources of specific Kind. - The map key is the resource name and value is a list of object names separated by commas. - Each resource name has format "namespace/objectname". For cluster resources, simply use "objectname". + description: OrderedResources specifies the backup order of resources + of specific Kind. The map key is the resource name and value is + a list of object names separated by commas. Each resource name has + format "namespace/objectname". For cluster resources, simply use + "objectname". nullable: true type: object resourcePolicy: @@ -457,10 +445,10 @@ spec: that backup should follow properties: apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. + description: APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in + the core API group. For any other third-party types, APIGroup + is required. type: string kind: description: Kind is the type of resource being referenced @@ -479,10 +467,8 @@ spec: nullable: true type: boolean snapshotVolumes: - description: |- - SnapshotVolumes specifies whether to take snapshots - of any PV's referenced in the set of objects included - in the Backup. + description: SnapshotVolumes specifies whether to take snapshots of + any PV's referenced in the set of objects included in the Backup. nullable: true type: boolean storageLocation: @@ -490,19 +476,9 @@ spec: BackupStorageLocation where the backup should be stored. type: string ttl: - description: |- - TTL is a time.Duration-parseable string describing how long - the Backup should be retained for. + description: TTL is a time.Duration-parseable string describing how + long the Backup should be retained for. type: string - uploaderConfig: - description: UploaderConfig specifies the configuration for the uploader. - nullable: true - properties: - parallelFilesUpload: - description: ParallelFilesUpload is the number of files parallel - uploads to perform when using the uploader. - type: integer - type: object volumeSnapshotLocations: description: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. @@ -514,44 +490,39 @@ spec: description: BackupStatus captures the current status of a Velero backup. properties: backupItemOperationsAttempted: - description: |- - BackupItemOperationsAttempted is the total number of attempted - async BackupItemAction operations for this backup. + description: BackupItemOperationsAttempted is the total number of + attempted async BackupItemAction operations for this backup. type: integer backupItemOperationsCompleted: - description: |- - BackupItemOperationsCompleted is the total number of successfully completed - async BackupItemAction operations for this backup. + description: BackupItemOperationsCompleted is the total number of + successfully completed async BackupItemAction operations for this + backup. type: integer backupItemOperationsFailed: - description: |- - BackupItemOperationsFailed is the total number of async - BackupItemAction operations for this backup which ended with an error. + description: BackupItemOperationsFailed is the total number of async + BackupItemAction operations for this backup which ended with an + error. type: integer completionTimestamp: - description: |- - CompletionTimestamp records the time a backup was completed. - Completion time is recorded even on failed backups. - Completion time is recorded before uploading the backup object. - The server's time is used for CompletionTimestamps + description: CompletionTimestamp records the time a backup was completed. + Completion time is recorded even on failed backups. Completion time + is recorded before uploading the backup object. The server's time + is used for CompletionTimestamps format: date-time nullable: true type: string csiVolumeSnapshotsAttempted: - description: |- - CSIVolumeSnapshotsAttempted is the total number of attempted + description: CSIVolumeSnapshotsAttempted is the total number of attempted CSI VolumeSnapshots for this backup. type: integer csiVolumeSnapshotsCompleted: - description: |- - CSIVolumeSnapshotsCompleted is the total number of successfully + description: CSIVolumeSnapshotsCompleted is the total number of successfully completed CSI VolumeSnapshots for this backup. type: integer errors: - description: |- - Errors is a count of all error messages that were generated during - execution of the backup. The actual errors are in the backup's log - file in object storage. + description: Errors is a count of all error messages that were generated + during execution of the backup. The actual errors are in the backup's + log file in object storage. type: integer expiration: description: Expiration is when this Backup is eligible for garbage-collection. @@ -566,22 +537,6 @@ spec: description: FormatVersion is the backup format version, including major, minor, and patch version. type: string - hookStatus: - description: HookStatus contains information about the status of the - hooks. - nullable: true - properties: - hooksAttempted: - description: |- - HooksAttempted is the total number of attempted hooks - Specifically, HooksAttempted represents the number of hooks that failed to execute - and the number of hooks that executed successfully. - type: integer - hooksFailed: - description: HooksFailed is the total number of hooks which ended - with an error - type: integer - type: object phase: description: Phase is the current state of the Backup. enum: @@ -598,62 +553,53 @@ spec: - Deleting type: string progress: - description: |- - Progress contains information about the backup's execution progress. Note - that this information is best-effort only -- if Velero fails to update it - during a backup for any reason, it may be inaccurate/stale. + description: Progress contains information about the backup's execution + progress. Note that this information is best-effort only -- if Velero + fails to update it during a backup for any reason, it may be inaccurate/stale. nullable: true properties: itemsBackedUp: - description: |- - ItemsBackedUp is the number of items that have actually been written to the - backup tarball so far. + description: ItemsBackedUp is the number of items that have actually + been written to the backup tarball so far. type: integer totalItems: - description: |- - TotalItems is the total number of items to be backed up. This number may change - throughout the execution of the backup due to plugins that return additional related - items to back up, the velero.io/exclude-from-backup label, and various other + description: TotalItems is the total number of items to be backed + up. This number may change throughout the execution of the backup + due to plugins that return additional related items to back + up, the velero.io/exclude-from-backup label, and various other filters that happen as items are processed. type: integer type: object startTimestamp: - description: |- - StartTimestamp records the time a backup was started. - Separate from CreationTimestamp, since that value changes - on restores. + description: StartTimestamp records the time a backup was started. + Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps format: date-time nullable: true type: string validationErrors: - description: |- - ValidationErrors is a slice of all validation errors (if - applicable). + description: ValidationErrors is a slice of all validation errors + (if applicable). items: type: string nullable: true type: array version: - description: |- - Version is the backup format major version. - Deprecated: Please see FormatVersion + description: 'Version is the backup format major version. Deprecated: + Please see FormatVersion' type: integer volumeSnapshotsAttempted: - description: |- - VolumeSnapshotsAttempted is the total number of attempted + description: VolumeSnapshotsAttempted is the total number of attempted volume snapshots for this backup. type: integer volumeSnapshotsCompleted: - description: |- - VolumeSnapshotsCompleted is the total number of successfully + description: VolumeSnapshotsCompleted is the total number of successfully completed volume snapshots for this backup. type: integer warnings: - description: |- - Warnings is a count of all warning messages that were generated during - execution of the backup. The actual warnings are in the backup's log - file in object storage. + description: Warnings is a count of all warning messages that were + generated during execution of the backup. The actual warnings are + in the backup's log file in object storage. type: integer type: object type: object diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 98a4add..040fdb9 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -22,7 +22,6 @@ import ( "crypto/sha256" "encoding/hex" "fmt" - "os" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/apimachinery/pkg/types" @@ -35,11 +34,6 @@ import ( const requiredAnnotationError = "backup does not have the required annotation '%s'" -// GetOADPNamespace get the namespace OADP operator is installed -func GetOADPNamespace() string { - return os.Getenv(constant.NamespaceEnvVar) -} - // AddNonAdminLabels return a map with both the object labels and with the default Non Admin labels. // If error occurs, a map with only the default Non Admin labels is returned func AddNonAdminLabels(labels map[string]string) map[string]string { diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index 78ff3d0..a4c59a6 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -128,7 +127,7 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log logger := logrLogger if nab.Status.Phase == constant.EmptyString { - updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseNew) + updated := updateNonAdminPhase(&nab.Status.Phase, nacv1alpha1.NonAdminBackupPhaseNew) if updated { if err := r.Status().Update(ctx, nab); err != nil { logger.Error(err, phaseUpdateError) @@ -140,7 +139,7 @@ func (r *NonAdminBackupReconciler) Init(ctx context.Context, logrLogger logr.Log } } - logger.V(1).Info("NonAdminBackup Status.Phase already initialized") + logger.V(1).Info("NonAdminBackup Phase already initialized") return false, false, nil } @@ -162,9 +161,7 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger // Main Validation point for the VeleroBackup included in NonAdminBackup spec _, err := function.GetBackupSpecFromNonAdminBackup(nab) if err != nil { - logger.Error(err, "NonAdminBackup Spec is not valid") - - updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseBackingOff) + updated := updateNonAdminPhase(&nab.Status.Phase, nacv1alpha1.NonAdminBackupPhaseBackingOff) if updated { if updateErr := r.Status().Update(ctx, nab); updateErr != nil { logger.Error(updateErr, phaseUpdateError) @@ -190,6 +187,7 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger } } + logger.Error(err, "NonAdminBackup Spec is not valid") return true, false, reconcile.TerminalError(err) } @@ -244,8 +242,6 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog return true, false, err } // Create VeleroBackup - // Don't update phase nor conditions yet. - // Those will be updated when then Reconcile loop is triggered by the VeleroBackup object veleroBackupLogger.Info("VeleroBackup not found") // We don't validate error here. @@ -275,28 +271,26 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog nabManagedAnnotations := function.AddNonAdminBackupAnnotations(nab.Namespace, nab.Name, ownerUUID, existingAnnotations) veleroBackup.Annotations = nabManagedAnnotations - _, err = controllerutil.CreateOrPatch(ctx, r.Client, &veleroBackup, nil) + err = r.Create(ctx, &veleroBackup) if err != nil { veleroBackupLogger.Error(err, "Failed to create VeleroBackup") return true, false, err } veleroBackupLogger.Info("VeleroBackup successfully created") + } - updated := updateNonAdminPhase(nab, nacv1alpha1.NonAdminBackupPhaseCreated) - if updated { - if err := r.Status().Update(ctx, nab); err != nil { - logger.Error(err, phaseUpdateError) - return true, false, err - } - - logger.V(1).Info(phaseUpdateRequeue) - return false, true, nil + updated := updateNonAdminPhase(&nab.Status.Phase, nacv1alpha1.NonAdminBackupPhaseCreated) + if updated { + if err := r.Status().Update(ctx, nab); err != nil { + logger.Error(err, phaseUpdateError) + return true, false, err } - return false, false, nil + logger.V(1).Info(phaseUpdateRequeue) + return false, true, nil } - updated := meta.SetStatusCondition(&nab.Status.Conditions, + updated = meta.SetStatusCondition(&nab.Status.Conditions, metav1.Condition{ Type: string(nacv1alpha1.NonAdminConditionQueued), Status: metav1.ConditionTrue, @@ -319,7 +313,7 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog // be reverted back to the previous state - the state which created VeleroBackup // in a first place, so they will be in sync. veleroBackupLogger.Info("VeleroBackup already exists, checking if NonAdminBackup VeleroBackupSpec and VeleroBackupStatus needs update") - updated = updateNonAdminBackupVeleroBackupStatus(nab, &veleroBackup) + updated = updateNonAdminBackupVeleroBackupStatus(&nab.Status, &veleroBackup) if updated { if err := r.Status().Update(ctx, nab); err != nil { veleroBackupLogger.Error(err, "NonAdminBackup BackupStatus - Failed to update") @@ -329,7 +323,7 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog logger.V(1).Info("NonAdminBackup - Requeue after Status Update") return false, true, nil } - updated = updateNonAdminBackupVeleroBackupSpec(nab, &veleroBackup) + updated = updateNonAdminBackupVeleroBackupSpec(&nab.Spec, &veleroBackup) if updated { if err := r.Update(ctx, nab); err != nil { veleroBackupLogger.Error(err, "NonAdminBackup BackupSpec - Failed to update") @@ -341,7 +335,7 @@ func (r *NonAdminBackupReconciler) UpdateSpecStatus(ctx context.Context, logrLog } logger.V(1).Info("NonAdminBackup VeleroBackupSpec and VeleroBackupStatus already up to date") - return true, false, nil + return false, false, nil } // SetupWithManager sets up the controller with the Manager. @@ -358,34 +352,39 @@ func (r *NonAdminBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -// updateNonAdminPhase updates the phase of a NonAdminBackup object with the provided phase. -func updateNonAdminPhase(nab *nacv1alpha1.NonAdminBackup, phase nacv1alpha1.NonAdminBackupPhase) bool { +// updateNonAdminPhase sets the phase in NonAdminBackup object status and returns true +// if the phase is changed by this call. +func updateNonAdminPhase(phase *nacv1alpha1.NonAdminBackupPhase, newPhase nacv1alpha1.NonAdminBackupPhase) bool { // Ensure phase is valid - if phase == constant.EmptyString { + if newPhase == constant.EmptyString { return false } - if nab.Status.Phase == phase { + if *phase == newPhase { return false } - nab.Status.Phase = phase + *phase = newPhase return true } -func updateNonAdminBackupVeleroBackupStatus(nab *nacv1alpha1.NonAdminBackup, veleroBackup *velerov1api.Backup) bool { - if !reflect.DeepEqual(nab.Status.VeleroBackupStatus, &veleroBackup.Status) || nab.Status.VeleroBackupName != veleroBackup.Name || nab.Status.VeleroBackupNamespace != veleroBackup.Namespace { - nab.Status.VeleroBackupStatus = veleroBackup.Status.DeepCopy() - nab.Status.VeleroBackupName = veleroBackup.Name - nab.Status.VeleroBackupNamespace = veleroBackup.Namespace +// updateNonAdminBackupVeleroBackupStatus sets the VeleroBackup fields in NonAdminBackup object status and returns true +// if the VeleroBackup fields are changed by this call. +func updateNonAdminBackupVeleroBackupStatus(status *nacv1alpha1.NonAdminBackupStatus, veleroBackup *velerov1api.Backup) bool { + if !reflect.DeepEqual(status.VeleroBackupStatus, &veleroBackup.Status) || status.VeleroBackupName != veleroBackup.Name || status.VeleroBackupNamespace != veleroBackup.Namespace { + status.VeleroBackupStatus = veleroBackup.Status.DeepCopy() + status.VeleroBackupName = veleroBackup.Name + status.VeleroBackupNamespace = veleroBackup.Namespace return true } return false } -func updateNonAdminBackupVeleroBackupSpec(nab *nacv1alpha1.NonAdminBackup, veleroBackup *velerov1api.Backup) bool { - if !reflect.DeepEqual(nab.Spec.BackupSpec, &veleroBackup.Spec) { - nab.Spec.BackupSpec = veleroBackup.Spec.DeepCopy() +// updateNonAdminBackupVeleroBackupSpec sets the BackupSpec in NonAdminBackup object spec and returns true +// if the BackupSpec is changed by this call. +func updateNonAdminBackupVeleroBackupSpec(spec *nacv1alpha1.NonAdminBackupSpec, veleroBackup *velerov1api.Backup) bool { + if !reflect.DeepEqual(spec.BackupSpec, &veleroBackup.Spec) { + spec.BackupSpec = veleroBackup.Spec.DeepCopy() return true } return false diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 7fd6409..3c24e86 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -19,7 +19,7 @@ package controller import ( "context" "fmt" - "log" + "reflect" "time" "github.com/onsi/ginkgo/v2" @@ -32,15 +32,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" - "github.com/migtools/oadp-non-admin/internal/common/constant" "github.com/migtools/oadp-non-admin/internal/common/function" ) const testNonAdminBackupName = "test-non-admin-backup" type nonAdminBackupSingleReconcileScenario struct { - namespace string - oadpNamespace string resultError error priorStatus *nacv1alpha1.NonAdminBackupStatus spec nacv1alpha1.NonAdminBackupSpec @@ -50,12 +47,8 @@ type nonAdminBackupSingleReconcileScenario struct { } type nonAdminBackupFullReconcileScenario struct { - ctx context.Context - cancel context.CancelFunc - namespace string - oadpNamespace string - spec nacv1alpha1.NonAdminBackupSpec - status nacv1alpha1.NonAdminBackupStatus + spec nacv1alpha1.NonAdminBackupSpec + status nacv1alpha1.NonAdminBackupStatus } func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { @@ -68,12 +61,88 @@ func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupS } } +func checkTestNonAdminBackupStatus(nonAdminBackup *nacv1alpha1.NonAdminBackup, expectedStatus nacv1alpha1.NonAdminBackupStatus) error { + if nonAdminBackup.Status.Phase != expectedStatus.Phase { + return fmt.Errorf("NonAdminBackup Status Phase %v is not equal to expected %v", nonAdminBackup.Status.Phase, expectedStatus.Phase) + } + if nonAdminBackup.Status.VeleroBackupName != expectedStatus.VeleroBackupName { + return fmt.Errorf("NonAdminBackup Status VeleroBackupName %v is not equal to expected %v", nonAdminBackup.Status.VeleroBackupName, expectedStatus.VeleroBackupName) + } + if nonAdminBackup.Status.VeleroBackupNamespace != expectedStatus.VeleroBackupNamespace { + return fmt.Errorf("NonAdminBackup Status VeleroBackupNamespace %v is not equal to expected %v", nonAdminBackup.Status.VeleroBackupNamespace, expectedStatus.VeleroBackupNamespace) + } + if !reflect.DeepEqual(nonAdminBackup.Status.VeleroBackupStatus, expectedStatus.VeleroBackupStatus) { + return fmt.Errorf("NonAdminBackup Status VeleroBackupStatus %v is not equal to expected %v", nonAdminBackup.Status.VeleroBackupStatus, expectedStatus.VeleroBackupStatus) + } + + if len(nonAdminBackup.Status.Conditions) != len(expectedStatus.Conditions) { + return fmt.Errorf("NonAdminBackup Status has %v Condition(s), expected to have %v", len(nonAdminBackup.Status.Conditions), len(expectedStatus.Conditions)) + } + for index := range nonAdminBackup.Status.Conditions { + if nonAdminBackup.Status.Conditions[index].Type != expectedStatus.Conditions[index].Type { + return fmt.Errorf("NonAdminBackup Status Conditions [%v] Type %v is not equal to expected %v", index, nonAdminBackup.Status.Conditions[index].Type, expectedStatus.Conditions[index].Type) + } + if nonAdminBackup.Status.Conditions[index].Status != expectedStatus.Conditions[index].Status { + return fmt.Errorf("NonAdminBackup Status Conditions [%v] Status %v is not equal to expected %v", index, nonAdminBackup.Status.Conditions[index].Status, expectedStatus.Conditions[index].Status) + } + if nonAdminBackup.Status.Conditions[index].Reason != expectedStatus.Conditions[index].Reason { + return fmt.Errorf("NonAdminBackup Status Conditions [%v] Reason %v is not equal to expected %v", index, nonAdminBackup.Status.Conditions[index].Reason, expectedStatus.Conditions[index].Reason) + } + if nonAdminBackup.Status.Conditions[index].Message != expectedStatus.Conditions[index].Message { + return fmt.Errorf("NonAdminBackup Status Conditions [%v] Message %v is not equal to expected %v", index, nonAdminBackup.Status.Conditions[index].Message, expectedStatus.Conditions[index].Message) + } + } + return nil +} + +func createTestNamespaces(ctx context.Context, nonAdminNamespaceName string, oadpNamespaceName string) error { + nonAdminNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: nonAdminNamespaceName, + }, + } + err := k8sClient.Create(ctx, nonAdminNamespace) + if err != nil { + return err + } + + oadpNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: oadpNamespaceName, + }, + } + return k8sClient.Create(ctx, oadpNamespace) +} + +func deleteTestNamespaces(ctx context.Context, nonAdminNamespaceName string, oadpNamespaceName string) error { + oadpNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: oadpNamespaceName, + }, + } + err := k8sClient.Delete(ctx, oadpNamespace) + if err != nil { + return err + } + + nonAdminNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: nonAdminNamespaceName, + }, + } + return k8sClient.Delete(ctx, nonAdminNamespace) +} + var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile function", func() { var ( - ctx = context.Background() - currentTestScenario nonAdminBackupSingleReconcileScenario - updateTestScenario = func(scenario nonAdminBackupSingleReconcileScenario) { - currentTestScenario = scenario + ctx = context.Background() + nonAdminNamespaceName = "" + oadpNamespaceName = "" + counter = 0 + updateTestScenario = func() { + counter++ + nonAdminNamespaceName = fmt.Sprintf("test-nonadminbackup-reconcile-%v", counter) + oadpNamespaceName = nonAdminNamespaceName + "-oadp" } ) @@ -83,40 +152,21 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func ctx, types.NamespacedName{ Name: testNonAdminBackupName, - Namespace: currentTestScenario.namespace, + Namespace: nonAdminNamespaceName, }, nonAdminBackup, ) == nil { gomega.Expect(k8sClient.Delete(ctx, nonAdminBackup)).To(gomega.Succeed()) } - if len(currentTestScenario.oadpNamespace) > 0 { - oadpNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentTestScenario.oadpNamespace, - }, - } - gomega.Expect(k8sClient.Delete(ctx, oadpNamespace)).To(gomega.Succeed()) - } - - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentTestScenario.namespace, - }, - } - gomega.Expect(k8sClient.Delete(ctx, namespace)).To(gomega.Succeed()) + gomega.Expect(deleteTestNamespaces(ctx, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) }) - ginkgo.DescribeTable("should Reconcile on Delete event", + ginkgo.DescribeTable("Reconcile called by NonAdminBackup Delete event", func(scenario nonAdminBackupSingleReconcileScenario) { - updateTestScenario(scenario) + updateTestScenario() - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: scenario.namespace, - }, - } - gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) + gomega.Expect(createTestNamespaces(ctx, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) result, err := (&NonAdminBackupReconciler{ Client: k8sClient, @@ -124,7 +174,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }).Reconcile( context.Background(), reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: scenario.namespace, + Namespace: nonAdminNamespaceName, Name: testNonAdminBackupName, }}, ) @@ -132,40 +182,28 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(result).To(gomega.Equal(scenario.result)) gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) }, - ginkgo.Entry("Should accept deletion of NonAdminBackup", nonAdminBackupSingleReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-0", - result: reconcile.Result{}, + ginkgo.Entry("Should exit", nonAdminBackupSingleReconcileScenario{ + result: reconcile.Result{}, }), ) - ginkgo.DescribeTable("should Reconcile on Create and Update events and on Requeue", + ginkgo.DescribeTable("Reconcile called by NonAdminBackup Create/Update events and by Requeue", func(scenario nonAdminBackupSingleReconcileScenario) { - updateTestScenario(scenario) + updateTestScenario() - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: scenario.namespace, - }, - } - gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) - - if len(scenario.oadpNamespace) > 0 { - oadpNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: scenario.oadpNamespace, - }, - } - gomega.Expect(k8sClient.Create(ctx, oadpNamespace)).To(gomega.Succeed()) - } + gomega.Expect(createTestNamespaces(ctx, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) - nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) + nonAdminBackup := createTestNonAdminBackup(nonAdminNamespaceName, scenario.spec) gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) if scenario.createVeleroBackup { veleroBackup := &v1.Backup{ ObjectMeta: metav1.ObjectMeta{ - Name: function.GenerateVeleroBackupName(scenario.namespace, testNonAdminBackupName), - Namespace: scenario.oadpNamespace, + Name: function.GenerateVeleroBackupName(nonAdminNamespaceName, testNonAdminBackupName), + Namespace: oadpNamespaceName, + }, + Spec: v1.BackupSpec{ + IncludedNamespaces: []string{nonAdminNamespaceName}, }, } gomega.Expect(k8sClient.Create(ctx, veleroBackup)).To(gomega.Succeed()) @@ -182,11 +220,11 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func result, err := (&NonAdminBackupReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, - OADPNamespace: scenario.oadpNamespace, + OADPNamespace: oadpNamespaceName, }).Reconcile( context.Background(), reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: scenario.namespace, + Namespace: nonAdminNamespaceName, Name: testNonAdminBackupName, }}, ) @@ -202,22 +240,23 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func ctx, types.NamespacedName{ Name: testNonAdminBackupName, - Namespace: currentTestScenario.namespace, + Namespace: nonAdminNamespaceName, }, nonAdminBackup, )).To(gomega.Succeed()) - gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) - gomega.Expect(nonAdminBackup.Status.VeleroBackupName).To(gomega.Equal(scenario.status.VeleroBackupName)) - gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) - gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus).To(gomega.Equal(scenario.status.VeleroBackupStatus)) - - gomega.Expect(nonAdminBackup.Status.Conditions).To(gomega.HaveLen(len(scenario.status.Conditions))) - for index := range nonAdminBackup.Status.Conditions { - gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Status).To(gomega.Equal(scenario.status.Conditions[index].Status)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Reason).To(gomega.Equal(scenario.status.Conditions[index].Reason)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) + gomega.Expect(checkTestNonAdminBackupStatus(nonAdminBackup, scenario.status)).To(gomega.Succeed()) + if scenario.priorStatus != nil { + if len(scenario.priorStatus.VeleroBackupName) > 0 { + gomega.Expect(reflect.DeepEqual( + nonAdminBackup.Spec.BackupSpec, + &v1.BackupSpec{ + IncludedNamespaces: []string{ + nonAdminNamespaceName, + }, + }, + )).To(gomega.BeTrue()) + } } // easy hack to test that only one update call happens per reconcile @@ -225,22 +264,19 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func // gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) // gomega.Expect(currentResourceVersion - priorResourceVersion).To(gomega.Equal(1)) }, - ginkgo.Entry("Should accept creation of NonAdminBackup", nonAdminBackupSingleReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-1", - result: reconcile.Result{Requeue: true}, + ginkgo.Entry("When called by NonAdminBackup Create event, should update NonAdminBackup phase to new and Requeue", nonAdminBackupSingleReconcileScenario{ status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, + result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase to new", nonAdminBackupSingleReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-2", + ginkgo.Entry("When called by Requeue(update NonAdminBackup phase to new), should update NonAdminBackup Condition to Accepted True and Requeue", nonAdminBackupSingleReconcileScenario{ spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, - result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, Conditions: []metav1.Condition{ @@ -252,10 +288,9 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, + result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("Should accept update of NonAdminBackup Condition to Accepted True", nonAdminBackupSingleReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-3", - oadpNamespace: "test-nonadminbackup-reconcile-3-oadp", + ginkgo.Entry("When called by Requeue(update NonAdminBackup Condition to Accepted True), should update NonAdminBackup phase to created and Requeue", nonAdminBackupSingleReconcileScenario{ spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, @@ -271,7 +306,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, - result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ // TODO should not have VeleroBackupName and VeleroBackupNamespace? Phase: nacv1alpha1.NonAdminBackupPhaseCreated, @@ -284,10 +318,10 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, + result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase to created", nonAdminBackupSingleReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-4", - oadpNamespace: "test-nonadminbackup-reconcile-4-oadp", + ginkgo.Entry("When called by Requeue(update NonAdminBackup phase to created), should update NonAdminBackup Condition to Queued True and Requeue", nonAdminBackupSingleReconcileScenario{ + createVeleroBackup: true, spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, @@ -303,8 +337,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, - createVeleroBackup: true, - result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ // TODO should not have VeleroBackupName and VeleroBackupNamespace? Phase: nacv1alpha1.NonAdminBackupPhaseCreated, @@ -323,10 +355,10 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, + result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("Should accept update of NonAdminBackup Condition to Queued True", nonAdminBackupSingleReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-5", - oadpNamespace: "test-nonadminbackup-reconcile-5-oadp", + ginkgo.Entry("When called by Requeue(update NonAdminBackup Condition to Queued True), should update NonAdminBackup VeleroBackupStatus and Requeue", nonAdminBackupSingleReconcileScenario{ + createVeleroBackup: true, spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, @@ -349,12 +381,59 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, + status: nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + VeleroBackupName: "nab-test-nonadminbackup-reconcile-6-c9dd6af01e2e2a", + VeleroBackupNamespace: "test-nonadminbackup-reconcile-6-oadp", + VeleroBackupStatus: &v1.BackupStatus{}, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "BackupAccepted", + Message: "Backup accepted", + }, + { + Type: "Queued", + Status: metav1.ConditionTrue, + Reason: "BackupScheduled", + Message: "Created Velero Backup object", + }, + }, + }, + result: reconcile.Result{Requeue: true}, + }), + ginkgo.Entry("When called by Requeue(update NonAdminBackup VeleroBackupStatus), should update NonAdminBackup spec BackupSpec and Requeue", nonAdminBackupSingleReconcileScenario{ createVeleroBackup: true, - result: reconcile.Result{Requeue: true}, + spec: nacv1alpha1.NonAdminBackupSpec{ + BackupSpec: &v1.BackupSpec{}, + }, + priorStatus: &nacv1alpha1.NonAdminBackupStatus{ + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + VeleroBackupName: "nab-test-nonadminbackup-reconcile-7-c9dd6af01e2e2a", + VeleroBackupNamespace: "test-nonadminbackup-reconcile-7-oadp", + VeleroBackupStatus: &v1.BackupStatus{}, + Conditions: []metav1.Condition{ + { + Type: "Accepted", + Status: metav1.ConditionTrue, + Reason: "BackupAccepted", + Message: "Backup accepted", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + { + Type: "Queued", + Status: metav1.ConditionTrue, + Reason: "BackupScheduled", + Message: "Created Velero Backup object", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseCreated, - VeleroBackupName: "nab-test-nonadminbackup-reconcile-5-c9dd6af01e2e2a", - VeleroBackupNamespace: "test-nonadminbackup-reconcile-5-oadp", + VeleroBackupName: "nab-test-nonadminbackup-reconcile-7-c9dd6af01e2e2a", + VeleroBackupNamespace: "test-nonadminbackup-reconcile-7-oadp", VeleroBackupStatus: &v1.BackupStatus{}, Conditions: []metav1.Condition{ { @@ -371,9 +450,10 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, + // TODO should not exit? + result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase to new - invalid spec", nonAdminBackupSingleReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-6", + ginkgo.Entry("When called by Requeue(update NonAdminBackup phase to new - invalid spec), should update NonAdminBackup phase to BackingOff and Requeue", nonAdminBackupSingleReconcileScenario{ spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{ IncludedNamespaces: []string{"not-valid"}, @@ -382,14 +462,13 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, - result: reconcile.Result{Requeue: true}, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, + result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("Should accept update of NonAdminBackup phase to BackingOff", nonAdminBackupSingleReconcileScenario{ - // this validates spec again... - namespace: "test-nonadminbackup-reconcile-7", + ginkgo.Entry("When called by Requeue(update NonAdminBackup phase to BackingOff), should update NonAdminBackup Condition to Accepted False and stop with terminal error", nonAdminBackupSingleReconcileScenario{ + // TODO this validates spec again... spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{ IncludedNamespaces: []string{"not-valid"}, @@ -398,7 +477,6 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, - resultError: reconcile.TerminalError(fmt.Errorf("spec.backupSpec.IncludedNamespaces can not contain namespaces other than: test-nonadminbackup-reconcile-7")), status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, Conditions: []metav1.Condition{ @@ -410,58 +488,39 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, + resultError: reconcile.TerminalError(fmt.Errorf("spec.backupSpec.IncludedNamespaces can not contain namespaces other than: test-nonadminbackup-reconcile-9")), }), ) }) var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", func() { var ( - currentTestScenario nonAdminBackupFullReconcileScenario - updateTestScenario = func(scenario nonAdminBackupFullReconcileScenario) { - ctx, cancel := context.WithCancel(context.Background()) - scenario.ctx = ctx - scenario.cancel = cancel - currentTestScenario = scenario + ctx context.Context + cancel context.CancelFunc + nonAdminNamespaceName = "" + oadpNamespaceName = "" + counter = 0 + updateTestScenario = func() { + ctx, cancel = context.WithCancel(context.Background()) + counter++ + nonAdminNamespaceName = fmt.Sprintf("test-nonadminbackup-reconcile-full-%v", counter) + oadpNamespaceName = nonAdminNamespaceName + "-oadp" } ) ginkgo.AfterEach(func() { - oadpNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentTestScenario.oadpNamespace, - }, - } - gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, oadpNamespace)).To(gomega.Succeed()) - - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentTestScenario.namespace, - }, - } - gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, namespace)).To(gomega.Succeed()) + gomega.Expect(deleteTestNamespaces(ctx, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) - currentTestScenario.cancel() + cancel() // wait cancel time.Sleep(1 * time.Second) }) ginkgo.DescribeTable("full reconcile loop", func(scenario nonAdminBackupFullReconcileScenario) { - updateTestScenario(scenario) - - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: scenario.namespace, - }, - } - gomega.Expect(k8sClient.Create(currentTestScenario.ctx, namespace)).To(gomega.Succeed()) + updateTestScenario() - oadpNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: scenario.oadpNamespace, - }, - } - gomega.Expect(k8sClient.Create(currentTestScenario.ctx, oadpNamespace)).To(gomega.Succeed()) + gomega.Expect(createTestNamespaces(ctx, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: k8sClient.Scheme(), @@ -471,70 +530,68 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", err = (&NonAdminBackupReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - OADPNamespace: scenario.oadpNamespace, + OADPNamespace: oadpNamespaceName, }).SetupWithManager(k8sManager) gomega.Expect(err).ToNot(gomega.HaveOccurred()) go func() { defer ginkgo.GinkgoRecover() - err = k8sManager.Start(currentTestScenario.ctx) + err = k8sManager.Start(ctx) gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run manager") }() - // wait manager start time.Sleep(1 * time.Second) - nonAdminBackup := createTestNonAdminBackup(scenario.namespace, scenario.spec) - gomega.Expect(k8sClient.Create(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) + ginkgo.By("Waiting Reconcile of create event") + nonAdminBackup := createTestNonAdminBackup(nonAdminNamespaceName, scenario.spec) + gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) // wait NAB reconcile time.Sleep(1 * time.Second) + + ginkgo.By("Fetching NonAdminBackup after Reconcile") gomega.Expect(k8sClient.Get( - currentTestScenario.ctx, + ctx, types.NamespacedName{ Name: testNonAdminBackupName, - Namespace: scenario.namespace, + Namespace: nonAdminNamespaceName, }, nonAdminBackup, )).To(gomega.Succeed()) - log.Println("Validating NonAdminBackup Status") - gomega.Expect(nonAdminBackup.Status.Phase).To(gomega.Equal(scenario.status.Phase)) - gomega.Expect(nonAdminBackup.Status.VeleroBackupName).To(gomega.Equal(scenario.status.VeleroBackupName)) - gomega.Expect(nonAdminBackup.Status.VeleroBackupNamespace).To(gomega.Equal(scenario.status.VeleroBackupNamespace)) - if len(scenario.status.VeleroBackupName) > 0 { - gomega.Expect(nonAdminBackup.Status.VeleroBackupStatus.Phase).To(gomega.Equal(v1.BackupPhase(constant.EmptyString))) - } - - gomega.Expect(nonAdminBackup.Status.Conditions).To(gomega.HaveLen(len(scenario.status.Conditions))) - for index := range nonAdminBackup.Status.Conditions { - gomega.Expect(nonAdminBackup.Status.Conditions[index].Type).To(gomega.Equal(scenario.status.Conditions[index].Type)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Status).To(gomega.Equal(scenario.status.Conditions[index].Status)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Reason).To(gomega.Equal(scenario.status.Conditions[index].Reason)) - gomega.Expect(nonAdminBackup.Status.Conditions[index].Message).To(gomega.Equal(scenario.status.Conditions[index].Message)) - } - log.Println("Validation of NonAdminBackup Status completed successfully") + ginkgo.By("Validating NonAdminBackup Status") + gomega.Expect(checkTestNonAdminBackupStatus(nonAdminBackup, scenario.status)).To(gomega.Succeed()) if len(scenario.status.VeleroBackupName) > 0 { - log.Println("Mocking VeleroBackup update to finished state") + ginkgo.By("Validating NonAdminBackup Spec") + gomega.Expect(reflect.DeepEqual( + nonAdminBackup.Spec.BackupSpec, + &v1.BackupSpec{ + IncludedNamespaces: []string{ + nonAdminNamespaceName, + }, + }, + )).To(gomega.BeTrue()) + + ginkgo.By("Simulating VeleroBackup update to finished state") veleroBackup := &v1.Backup{} gomega.Expect(k8sClient.Get( - currentTestScenario.ctx, + ctx, types.NamespacedName{ Name: scenario.status.VeleroBackupName, - Namespace: scenario.oadpNamespace, + Namespace: oadpNamespaceName, }, veleroBackup, )).To(gomega.Succeed()) veleroBackup.Status.Phase = v1.BackupPhaseCompleted - // TODO I can not call .Status().Update() for veleroBackup object: backups.velero.io "name..." not found - gomega.Expect(k8sClient.Update(currentTestScenario.ctx, veleroBackup)).To(gomega.Succeed()) + // TODO can not call .Status().Update() for veleroBackup object: backups.velero.io "name..." not found error + gomega.Expect(k8sClient.Update(ctx, veleroBackup)).To(gomega.Succeed()) gomega.Eventually(func() (bool, error) { err := k8sClient.Get( - currentTestScenario.ctx, + ctx, types.NamespacedName{ Name: testNonAdminBackupName, - Namespace: scenario.namespace, + Namespace: nonAdminNamespaceName, }, nonAdminBackup, ) @@ -545,13 +602,11 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) } - gomega.Expect(k8sClient.Delete(currentTestScenario.ctx, nonAdminBackup)).To(gomega.Succeed()) - // wait reconcile of delete event + ginkgo.By("Waiting Reconcile of delete event") + gomega.Expect(k8sClient.Delete(ctx, nonAdminBackup)).To(gomega.Succeed()) time.Sleep(1 * time.Second) }, - ginkgo.Entry("Should update NonAdminBackup until VeleroBackup completes and than delete it", nonAdminBackupFullReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-full-1", - oadpNamespace: "test-nonadminbackup-reconcile-full-1-oadp", + ginkgo.Entry("Should update NonAdminBackup until VeleroBackup completes and then delete it", nonAdminBackupFullReconcileScenario{ spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, @@ -559,6 +614,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", Phase: nacv1alpha1.NonAdminBackupPhaseCreated, VeleroBackupName: "nab-test-nonadminbackup-reconcile-full-1-c9dd6af01e2e2a", VeleroBackupNamespace: "test-nonadminbackup-reconcile-full-1-oadp", + VeleroBackupStatus: &v1.BackupStatus{}, Conditions: []metav1.Condition{ { Type: "Accepted", @@ -575,9 +631,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }, }, }), - ginkgo.Entry("Should update NonAdminBackup until it invalidates and than delete it", nonAdminBackupFullReconcileScenario{ - namespace: "test-nonadminbackup-reconcile-full-2", - oadpNamespace: "test-nonadminbackup-reconcile-full-2-oadp", + ginkgo.Entry("Should update NonAdminBackup until it invalidates and then delete it", nonAdminBackupFullReconcileScenario{ spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{ IncludedNamespaces: []string{"not-valid"}, diff --git a/internal/predicate/nonadminbackup_predicate.go b/internal/predicate/nonadminbackup_predicate.go index 6bce7f4..f211a7e 100644 --- a/internal/predicate/nonadminbackup_predicate.go +++ b/internal/predicate/nonadminbackup_predicate.go @@ -72,7 +72,7 @@ func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent // New phase set, reconcile if oldPhase == constant.EmptyString && newPhase != constant.EmptyString { - logger.V(1).Info("NonAdminBsackupPredicate: Accepted Update event - phase change") + logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - phase change") return true } else if oldPhase == nacv1alpha1.NonAdminBackupPhaseNew && newPhase == nacv1alpha1.NonAdminBackupPhaseCreated { logger.V(1).Info("NonAdminBackupPredicate: Accepted Update event - phase created") From aa02b5f3bb6e0aa12b9f3c41620c453560f020e6 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Sun, 8 Sep 2024 15:20:11 -0300 Subject: [PATCH 13/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- .../controller/nonadminbackup_controller.go | 4 ++-- .../nonadminbackup_controller_test.go | 24 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index a4c59a6..cf80fa3 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -177,7 +177,7 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger Type: string(nacv1alpha1.NonAdminConditionAccepted), Status: metav1.ConditionFalse, Reason: "InvalidBackupSpec", - Message: "NonAdminBackup does not contain valid Spec", + Message: err.Error(), }, ) if updated { @@ -196,7 +196,7 @@ func (r *NonAdminBackupReconciler) ValidateSpec(ctx context.Context, logrLogger Type: string(nacv1alpha1.NonAdminConditionAccepted), Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", }, ) if updated { diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 3c24e86..d396c52 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -284,7 +284,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", }, }, }, @@ -301,7 +301,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", LastTransitionTime: metav1.NewTime(time.Now()), }, }, @@ -314,7 +314,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", }, }, }, @@ -332,7 +332,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", LastTransitionTime: metav1.NewTime(time.Now()), }, }, @@ -345,7 +345,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", }, { Type: "Queued", @@ -369,7 +369,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", LastTransitionTime: metav1.NewTime(time.Now()), }, { @@ -391,7 +391,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", }, { Type: "Queued", @@ -418,7 +418,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", LastTransitionTime: metav1.NewTime(time.Now()), }, { @@ -440,7 +440,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", }, { Type: "Queued", @@ -484,7 +484,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func Type: "Accepted", Status: metav1.ConditionFalse, Reason: "InvalidBackupSpec", - Message: "NonAdminBackup does not contain valid Spec", + Message: "spec.backupSpec.IncludedNamespaces can not contain namespaces other than: test-nonadminbackup-reconcile-9", }, }, }, @@ -620,7 +620,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", Type: "Accepted", Status: metav1.ConditionTrue, Reason: "BackupAccepted", - Message: "Backup accepted", + Message: "backup accepted", }, { Type: "Queued", @@ -644,7 +644,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", Type: "Accepted", Status: metav1.ConditionFalse, Reason: "InvalidBackupSpec", - Message: "NonAdminBackup does not contain valid Spec", + Message: "spec.backupSpec.IncludedNamespaces can not contain namespaces other than: test-nonadminbackup-reconcile-full-2", }, }, }, From 1a4f4ce2667b343ef0fce4268c4420fe63643bcd Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Wed, 11 Sep 2024 09:42:39 -0300 Subject: [PATCH 14/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- internal/handler/velerobackup_handler.go | 8 ++++---- internal/predicate/nonadminbackup_predicate.go | 16 ++++++++-------- internal/predicate/velerobackup_predicate.go | 12 ++++++------ 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/internal/handler/velerobackup_handler.go b/internal/handler/velerobackup_handler.go index d1fe4c1..ce484b9 100644 --- a/internal/handler/velerobackup_handler.go +++ b/internal/handler/velerobackup_handler.go @@ -41,17 +41,17 @@ func getVeleroBackupHandlerLogger(ctx context.Context, name, namespace string) l // Create event handler func (*VeleroBackupHandler) Create(ctx context.Context, evt event.CreateEvent, _ workqueue.RateLimitingInterface) { - nameSpace := evt.Object.GetNamespace() + namespace := evt.Object.GetNamespace() name := evt.Object.GetName() - logger := getVeleroBackupHandlerLogger(ctx, name, nameSpace) + logger := getVeleroBackupHandlerLogger(ctx, name, namespace) logger.V(1).Info("Received Create VeleroBackupHandler") } // Update event handler func (*VeleroBackupHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - nameSpace := evt.ObjectNew.GetNamespace() + namespace := evt.ObjectNew.GetNamespace() name := evt.ObjectNew.GetName() - logger := getVeleroBackupHandlerLogger(ctx, name, nameSpace) + logger := getVeleroBackupHandlerLogger(ctx, name, namespace) logger.V(1).Info("Received Update VeleroBackupHandler") annotations := evt.ObjectNew.GetAnnotations() diff --git a/internal/predicate/nonadminbackup_predicate.go b/internal/predicate/nonadminbackup_predicate.go index f211a7e..6597de5 100644 --- a/internal/predicate/nonadminbackup_predicate.go +++ b/internal/predicate/nonadminbackup_predicate.go @@ -39,9 +39,9 @@ func getNonAdminBackupPredicateLogger(ctx context.Context, name, namespace strin // Create event filter func (NonAdminBackupPredicate) Create(ctx context.Context, evt event.CreateEvent) bool { - nameSpace := evt.Object.GetNamespace() + namespace := evt.Object.GetNamespace() name := evt.Object.GetName() - logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) + logger := getNonAdminBackupPredicateLogger(ctx, name, namespace) logger.V(1).Info("NonAdminBackupPredicate: Received Create event") if nonAdminBackup, ok := evt.Object.(*nacv1alpha1.NonAdminBackup); ok { if nonAdminBackup.Status.Phase == constant.EmptyString || nonAdminBackup.Status.Phase == nacv1alpha1.NonAdminBackupPhaseNew { @@ -55,9 +55,9 @@ func (NonAdminBackupPredicate) Create(ctx context.Context, evt event.CreateEvent // Update event filter func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent) bool { - nameSpace := evt.ObjectNew.GetNamespace() + namespace := evt.ObjectNew.GetNamespace() name := evt.ObjectNew.GetName() - logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) + logger := getNonAdminBackupPredicateLogger(ctx, name, namespace) logger.V(1).Info("NonAdminBackupPredicate: Received Update event") if evt.ObjectNew.GetGeneration() != evt.ObjectOld.GetGeneration() { @@ -87,18 +87,18 @@ func (NonAdminBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent // Delete event filter func (NonAdminBackupPredicate) Delete(ctx context.Context, evt event.DeleteEvent) bool { - nameSpace := evt.Object.GetNamespace() + namespace := evt.Object.GetNamespace() name := evt.Object.GetName() - logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) + logger := getNonAdminBackupPredicateLogger(ctx, name, namespace) logger.V(1).Info("NonAdminBackupPredicate: Accepted Delete event") return true } // Generic event filter func (NonAdminBackupPredicate) Generic(ctx context.Context, evt event.GenericEvent) bool { - nameSpace := evt.Object.GetNamespace() + namespace := evt.Object.GetNamespace() name := evt.Object.GetName() - logger := getNonAdminBackupPredicateLogger(ctx, name, nameSpace) + logger := getNonAdminBackupPredicateLogger(ctx, name, namespace) logger.V(1).Info("NonAdminBackupPredicate: Accepted Generic event") return true } diff --git a/internal/predicate/velerobackup_predicate.go b/internal/predicate/velerobackup_predicate.go index 7879937..22da1d3 100644 --- a/internal/predicate/velerobackup_predicate.go +++ b/internal/predicate/velerobackup_predicate.go @@ -42,13 +42,13 @@ func getBackupPredicateLogger(ctx context.Context, name, namespace string) logr. // Create event filter func (veleroBackupPredicate VeleroBackupPredicate) Create(ctx context.Context, evt event.CreateEvent) bool { - nameSpace := evt.Object.GetNamespace() - if nameSpace != veleroBackupPredicate.OadpVeleroNamespace { + namespace := evt.Object.GetNamespace() + if namespace != veleroBackupPredicate.OadpVeleroNamespace { return false } name := evt.Object.GetName() - logger := getBackupPredicateLogger(ctx, name, nameSpace) + logger := getBackupPredicateLogger(ctx, name, namespace) logger.V(1).Info("VeleroBackupPredicate: Received Create event") return function.CheckVeleroBackupLabels(evt.Object.GetLabels()) @@ -56,11 +56,11 @@ func (veleroBackupPredicate VeleroBackupPredicate) Create(ctx context.Context, e // Update event filter func (veleroBackupPredicate VeleroBackupPredicate) Update(ctx context.Context, evt event.UpdateEvent) bool { - nameSpace := evt.ObjectNew.GetNamespace() + namespace := evt.ObjectNew.GetNamespace() name := evt.ObjectNew.GetName() - logger := getBackupPredicateLogger(ctx, name, nameSpace) + logger := getBackupPredicateLogger(ctx, name, namespace) logger.V(1).Info("VeleroBackupPredicate: Received Update event") - return nameSpace == veleroBackupPredicate.OadpVeleroNamespace + return namespace == veleroBackupPredicate.OadpVeleroNamespace } // Delete event filter From 55a34e622588bbf23ecc4dcf87bfe7b23f14943b Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Wed, 11 Sep 2024 16:28:19 -0300 Subject: [PATCH 15/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- internal/controller/nonadminbackup_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index cf80fa3..e20d0c2 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -73,7 +73,7 @@ func (r *NonAdminBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reque err := r.Get(ctx, req.NamespacedName, &nab) if err != nil { if apierrors.IsNotFound(err) { - logger.V(1).Info("NonAdminBackup was deleted") + logger.V(1).Info(err.Error()) return ctrl.Result{}, nil } logger.Error(err, "Unable to fetch NonAdminBackup") From 03e4b9fcc9c841eb1439a2ec2ba52a999b47b3f9 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Fri, 20 Sep 2024 15:18:08 -0300 Subject: [PATCH 16/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- .../nonadminbackup_controller_test.go | 88 +++++++++++-------- 1 file changed, 53 insertions(+), 35 deletions(-) diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index d396c52..8e79457 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "reflect" + "strings" "time" "github.com/onsi/ginkgo/v2" @@ -35,13 +36,16 @@ import ( "github.com/migtools/oadp-non-admin/internal/common/function" ) -const testNonAdminBackupName = "test-non-admin-backup" +const ( + testNonAdminBackupName = "test-non-admin-backup" + placeholder = "PLACEHOLDER" +) type nonAdminBackupSingleReconcileScenario struct { resultError error priorStatus *nacv1alpha1.NonAdminBackupStatus spec nacv1alpha1.NonAdminBackupSpec - status nacv1alpha1.NonAdminBackupStatus + ExpectedStatus nacv1alpha1.NonAdminBackupStatus result reconcile.Result createVeleroBackup bool } @@ -51,7 +55,7 @@ type nonAdminBackupFullReconcileScenario struct { status nacv1alpha1.NonAdminBackupStatus } -func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { +func buildTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupSpec) *nacv1alpha1.NonAdminBackup { return &nacv1alpha1.NonAdminBackup{ ObjectMeta: metav1.ObjectMeta{ Name: testNonAdminBackupName, @@ -61,15 +65,23 @@ func createTestNonAdminBackup(namespace string, spec nacv1alpha1.NonAdminBackupS } } -func checkTestNonAdminBackupStatus(nonAdminBackup *nacv1alpha1.NonAdminBackup, expectedStatus nacv1alpha1.NonAdminBackupStatus) error { +func checkTestNonAdminBackupStatus(nonAdminBackup *nacv1alpha1.NonAdminBackup, expectedStatus nacv1alpha1.NonAdminBackupStatus, nonAdminNamespaceName string, oadpNamespaceName string) error { if nonAdminBackup.Status.Phase != expectedStatus.Phase { return fmt.Errorf("NonAdminBackup Status Phase %v is not equal to expected %v", nonAdminBackup.Status.Phase, expectedStatus.Phase) } - if nonAdminBackup.Status.VeleroBackupName != expectedStatus.VeleroBackupName { - return fmt.Errorf("NonAdminBackup Status VeleroBackupName %v is not equal to expected %v", nonAdminBackup.Status.VeleroBackupName, expectedStatus.VeleroBackupName) + veleroBackupName := expectedStatus.VeleroBackupName + if expectedStatus.VeleroBackupName == placeholder { + veleroBackupName = function.GenerateVeleroBackupName(nonAdminNamespaceName, testNonAdminBackupName) + } + if nonAdminBackup.Status.VeleroBackupName != veleroBackupName { + return fmt.Errorf("NonAdminBackup Status VeleroBackupName %v is not equal to expected %v", nonAdminBackup.Status.VeleroBackupName, veleroBackupName) } - if nonAdminBackup.Status.VeleroBackupNamespace != expectedStatus.VeleroBackupNamespace { - return fmt.Errorf("NonAdminBackup Status VeleroBackupNamespace %v is not equal to expected %v", nonAdminBackup.Status.VeleroBackupNamespace, expectedStatus.VeleroBackupNamespace) + veleroBackupNamespace := expectedStatus.VeleroBackupNamespace + if expectedStatus.VeleroBackupNamespace == placeholder { + veleroBackupNamespace = oadpNamespaceName + } + if nonAdminBackup.Status.VeleroBackupNamespace != veleroBackupNamespace { + return fmt.Errorf("NonAdminBackup Status VeleroBackupNamespace %v is not equal to expected %v", nonAdminBackup.Status.VeleroBackupNamespace, veleroBackupNamespace) } if !reflect.DeepEqual(nonAdminBackup.Status.VeleroBackupStatus, expectedStatus.VeleroBackupStatus) { return fmt.Errorf("NonAdminBackup Status VeleroBackupStatus %v is not equal to expected %v", nonAdminBackup.Status.VeleroBackupStatus, expectedStatus.VeleroBackupStatus) @@ -88,8 +100,8 @@ func checkTestNonAdminBackupStatus(nonAdminBackup *nacv1alpha1.NonAdminBackup, e if nonAdminBackup.Status.Conditions[index].Reason != expectedStatus.Conditions[index].Reason { return fmt.Errorf("NonAdminBackup Status Conditions [%v] Reason %v is not equal to expected %v", index, nonAdminBackup.Status.Conditions[index].Reason, expectedStatus.Conditions[index].Reason) } - if nonAdminBackup.Status.Conditions[index].Message != expectedStatus.Conditions[index].Message { - return fmt.Errorf("NonAdminBackup Status Conditions [%v] Message %v is not equal to expected %v", index, nonAdminBackup.Status.Conditions[index].Message, expectedStatus.Conditions[index].Message) + if !strings.Contains(nonAdminBackup.Status.Conditions[index].Message, expectedStatus.Conditions[index].Message) { + return fmt.Errorf("NonAdminBackup Status Conditions [%v] Message %v does not contain expected message %v", index, nonAdminBackup.Status.Conditions[index].Message, expectedStatus.Conditions[index].Message) } } return nil @@ -193,7 +205,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(createTestNamespaces(ctx, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) - nonAdminBackup := createTestNonAdminBackup(nonAdminNamespaceName, scenario.spec) + nonAdminBackup := buildTestNonAdminBackup(nonAdminNamespaceName, scenario.spec) gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) if scenario.createVeleroBackup { @@ -211,6 +223,12 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func if scenario.priorStatus != nil { nonAdminBackup.Status = *scenario.priorStatus + if nonAdminBackup.Status.VeleroBackupName == placeholder { + nonAdminBackup.Status.VeleroBackupName = function.GenerateVeleroBackupName(nonAdminNamespaceName, testNonAdminBackupName) + } + if nonAdminBackup.Status.VeleroBackupNamespace == placeholder { + nonAdminBackup.Status.VeleroBackupNamespace = oadpNamespaceName + } gomega.Expect(k8sClient.Status().Update(ctx, nonAdminBackup)).To(gomega.Succeed()) } // easy hack to test that only one update call happens per reconcile @@ -233,7 +251,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) } else { gomega.Expect(err).To(gomega.HaveOccurred()) - gomega.Expect(err.Error()).To(gomega.Equal(scenario.resultError.Error())) + gomega.Expect(err.Error()).To(gomega.ContainSubstring(scenario.resultError.Error())) } gomega.Expect(k8sClient.Get( @@ -245,7 +263,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func nonAdminBackup, )).To(gomega.Succeed()) - gomega.Expect(checkTestNonAdminBackupStatus(nonAdminBackup, scenario.status)).To(gomega.Succeed()) + gomega.Expect(checkTestNonAdminBackupStatus(nonAdminBackup, scenario.ExpectedStatus, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) if scenario.priorStatus != nil { if len(scenario.priorStatus.VeleroBackupName) > 0 { gomega.Expect(reflect.DeepEqual( @@ -265,7 +283,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func // gomega.Expect(currentResourceVersion - priorResourceVersion).To(gomega.Equal(1)) }, ginkgo.Entry("When called by NonAdminBackup Create event, should update NonAdminBackup phase to new and Requeue", nonAdminBackupSingleReconcileScenario{ - status: nacv1alpha1.NonAdminBackupStatus{ + ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, result: reconcile.Result{Requeue: true}, @@ -277,7 +295,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, - status: nacv1alpha1.NonAdminBackupStatus{ + ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, Conditions: []metav1.Condition{ { @@ -306,7 +324,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, - status: nacv1alpha1.NonAdminBackupStatus{ + ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ // TODO should not have VeleroBackupName and VeleroBackupNamespace? Phase: nacv1alpha1.NonAdminBackupPhaseCreated, Conditions: []metav1.Condition{ @@ -337,7 +355,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, - status: nacv1alpha1.NonAdminBackupStatus{ + ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ // TODO should not have VeleroBackupName and VeleroBackupNamespace? Phase: nacv1alpha1.NonAdminBackupPhaseCreated, Conditions: []metav1.Condition{ @@ -381,10 +399,10 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, - status: nacv1alpha1.NonAdminBackupStatus{ + ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseCreated, - VeleroBackupName: "nab-test-nonadminbackup-reconcile-6-c9dd6af01e2e2a", - VeleroBackupNamespace: "test-nonadminbackup-reconcile-6-oadp", + VeleroBackupName: placeholder, + VeleroBackupNamespace: placeholder, VeleroBackupStatus: &v1.BackupStatus{}, Conditions: []metav1.Condition{ { @@ -410,8 +428,8 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseCreated, - VeleroBackupName: "nab-test-nonadminbackup-reconcile-7-c9dd6af01e2e2a", - VeleroBackupNamespace: "test-nonadminbackup-reconcile-7-oadp", + VeleroBackupName: placeholder, + VeleroBackupNamespace: placeholder, VeleroBackupStatus: &v1.BackupStatus{}, Conditions: []metav1.Condition{ { @@ -430,10 +448,10 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, }, - status: nacv1alpha1.NonAdminBackupStatus{ + ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseCreated, - VeleroBackupName: "nab-test-nonadminbackup-reconcile-7-c9dd6af01e2e2a", - VeleroBackupNamespace: "test-nonadminbackup-reconcile-7-oadp", + VeleroBackupName: placeholder, + VeleroBackupNamespace: placeholder, VeleroBackupStatus: &v1.BackupStatus{}, Conditions: []metav1.Condition{ { @@ -462,7 +480,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, - status: nacv1alpha1.NonAdminBackupStatus{ + ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, result: reconcile.Result{Requeue: true}, @@ -477,18 +495,18 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func priorStatus: &nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, }, - status: nacv1alpha1.NonAdminBackupStatus{ + ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseBackingOff, Conditions: []metav1.Condition{ { Type: "Accepted", Status: metav1.ConditionFalse, Reason: "InvalidBackupSpec", - Message: "spec.backupSpec.IncludedNamespaces can not contain namespaces other than: test-nonadminbackup-reconcile-9", + Message: "spec.backupSpec.IncludedNamespaces can not contain namespaces other than:", }, }, }, - resultError: reconcile.TerminalError(fmt.Errorf("spec.backupSpec.IncludedNamespaces can not contain namespaces other than: test-nonadminbackup-reconcile-9")), + resultError: reconcile.TerminalError(fmt.Errorf("spec.backupSpec.IncludedNamespaces can not contain namespaces other than: ")), }), ) }) @@ -543,7 +561,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", time.Sleep(1 * time.Second) ginkgo.By("Waiting Reconcile of create event") - nonAdminBackup := createTestNonAdminBackup(nonAdminNamespaceName, scenario.spec) + nonAdminBackup := buildTestNonAdminBackup(nonAdminNamespaceName, scenario.spec) gomega.Expect(k8sClient.Create(ctx, nonAdminBackup)).To(gomega.Succeed()) // wait NAB reconcile time.Sleep(1 * time.Second) @@ -559,7 +577,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", )).To(gomega.Succeed()) ginkgo.By("Validating NonAdminBackup Status") - gomega.Expect(checkTestNonAdminBackupStatus(nonAdminBackup, scenario.status)).To(gomega.Succeed()) + gomega.Expect(checkTestNonAdminBackupStatus(nonAdminBackup, scenario.status, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) if len(scenario.status.VeleroBackupName) > 0 { ginkgo.By("Validating NonAdminBackup Spec") @@ -577,7 +595,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", gomega.Expect(k8sClient.Get( ctx, types.NamespacedName{ - Name: scenario.status.VeleroBackupName, + Name: function.GenerateVeleroBackupName(nonAdminNamespaceName, testNonAdminBackupName), Namespace: oadpNamespaceName, }, veleroBackup, @@ -612,8 +630,8 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", }, status: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseCreated, - VeleroBackupName: "nab-test-nonadminbackup-reconcile-full-1-c9dd6af01e2e2a", - VeleroBackupNamespace: "test-nonadminbackup-reconcile-full-1-oadp", + VeleroBackupName: placeholder, + VeleroBackupNamespace: placeholder, VeleroBackupStatus: &v1.BackupStatus{}, Conditions: []metav1.Condition{ { @@ -644,7 +662,7 @@ var _ = ginkgo.Describe("Test full reconcile loop of NonAdminBackup Controller", Type: "Accepted", Status: metav1.ConditionFalse, Reason: "InvalidBackupSpec", - Message: "spec.backupSpec.IncludedNamespaces can not contain namespaces other than: test-nonadminbackup-reconcile-full-2", + Message: "spec.backupSpec.IncludedNamespaces can not contain namespaces other than:", }, }, }, From 94dfd83907c6175c1ebcf26499d38342eda526f9 Mon Sep 17 00:00:00 2001 From: Mateus Oliveira Date: Tue, 24 Sep 2024 10:36:36 -0300 Subject: [PATCH 17/17] fixup! fix: add integration tests for NAB Signed-off-by: Mateus Oliveira --- .../nonadminbackup_controller_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index 8e79457..5cfeabf 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -174,7 +174,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func gomega.Expect(deleteTestNamespaces(ctx, nonAdminNamespaceName, oadpNamespaceName)).To(gomega.Succeed()) }) - ginkgo.DescribeTable("Reconcile called by NonAdminBackup Delete event", + ginkgo.DescribeTable("Reconcile triggered by NonAdminBackup Delete event", func(scenario nonAdminBackupSingleReconcileScenario) { updateTestScenario() @@ -199,7 +199,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }), ) - ginkgo.DescribeTable("Reconcile called by NonAdminBackup Create/Update events and by Requeue", + ginkgo.DescribeTable("Reconcile triggered by NonAdminBackup Create/Update events and by Requeue", func(scenario nonAdminBackupSingleReconcileScenario) { updateTestScenario() @@ -282,13 +282,13 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func // gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) // gomega.Expect(currentResourceVersion - priorResourceVersion).To(gomega.Equal(1)) }, - ginkgo.Entry("When called by NonAdminBackup Create event, should update NonAdminBackup phase to new and Requeue", nonAdminBackupSingleReconcileScenario{ + ginkgo.Entry("When triggered by NonAdminBackup Create event, should update NonAdminBackup phase to new and Requeue", nonAdminBackupSingleReconcileScenario{ ExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ Phase: nacv1alpha1.NonAdminBackupPhaseNew, }, result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("When called by Requeue(update NonAdminBackup phase to new), should update NonAdminBackup Condition to Accepted True and Requeue", nonAdminBackupSingleReconcileScenario{ + ginkgo.Entry("When triggered by Requeue(NonAdminBackup phase new), should update NonAdminBackup Condition to Accepted True and Requeue", nonAdminBackupSingleReconcileScenario{ spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, @@ -308,7 +308,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("When called by Requeue(update NonAdminBackup Condition to Accepted True), should update NonAdminBackup phase to created and Requeue", nonAdminBackupSingleReconcileScenario{ + ginkgo.Entry("When triggered by Requeue(NonAdminBackup phase new; Conditions Accepted True), should update NonAdminBackup phase to created and Requeue", nonAdminBackupSingleReconcileScenario{ spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, }, @@ -338,7 +338,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("When called by Requeue(update NonAdminBackup phase to created), should update NonAdminBackup Condition to Queued True and Requeue", nonAdminBackupSingleReconcileScenario{ + ginkgo.Entry("When triggered by Requeue(NonAdminBackup phase created; Conditions Accepted True), should update NonAdminBackup Condition to Queued True and Requeue", nonAdminBackupSingleReconcileScenario{ createVeleroBackup: true, spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, @@ -375,7 +375,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("When called by Requeue(update NonAdminBackup Condition to Queued True), should update NonAdminBackup VeleroBackupStatus and Requeue", nonAdminBackupSingleReconcileScenario{ + ginkgo.Entry("When triggered by Requeue(NonAdminBackup phase created; Conditions Accepted True, Queued True), should update NonAdminBackup VeleroBackupStatus and Requeue", nonAdminBackupSingleReconcileScenario{ createVeleroBackup: true, spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, @@ -421,7 +421,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("When called by Requeue(update NonAdminBackup VeleroBackupStatus), should update NonAdminBackup spec BackupSpec and Requeue", nonAdminBackupSingleReconcileScenario{ + ginkgo.Entry("When triggered by Requeue(NonAdminBackup phase created; Conditions Accepted True, Queued True; VeleroBackupStatus), should update NonAdminBackup spec BackupSpec and Requeue", nonAdminBackupSingleReconcileScenario{ createVeleroBackup: true, spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{}, @@ -471,7 +471,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func // TODO should not exit? result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("When called by Requeue(update NonAdminBackup phase to new - invalid spec), should update NonAdminBackup phase to BackingOff and Requeue", nonAdminBackupSingleReconcileScenario{ + ginkgo.Entry("When triggered by Requeue(NonAdminBackup phase new) [invalid spec], should update NonAdminBackup phase to BackingOff and Requeue", nonAdminBackupSingleReconcileScenario{ spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{ IncludedNamespaces: []string{"not-valid"}, @@ -485,7 +485,7 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, result: reconcile.Result{Requeue: true}, }), - ginkgo.Entry("When called by Requeue(update NonAdminBackup phase to BackingOff), should update NonAdminBackup Condition to Accepted False and stop with terminal error", nonAdminBackupSingleReconcileScenario{ + ginkgo.Entry("When triggered by Requeue(NonAdminBackup phase BackingOff), should update NonAdminBackup Condition to Accepted False and stop with terminal error", nonAdminBackupSingleReconcileScenario{ // TODO this validates spec again... spec: nacv1alpha1.NonAdminBackupSpec{ BackupSpec: &v1.BackupSpec{