From bfc9151cacad1258a2c5166064d3afdf59708b38 Mon Sep 17 00:00:00 2001 From: Sebastian Widmer Date: Mon, 30 Sep 2024 13:38:49 +0200 Subject: [PATCH 1/5] Add next possible schedules to status --- api/v1beta1/upgradeconfig_types.go | 9 ++ api/v1beta1/zz_generated.deepcopy.go | 23 ++++ ...nagedupgrade.appuio.io_upgradeconfigs.yaml | 17 +++ controllers/upgradeconfig_controller.go | 126 ++++++++++-------- controllers/upgradeconfig_controller_test.go | 70 ++++++++++ pkg/schedule/schedule.go | 60 +++++++++ .../utils/utils.go} | 0 .../utils/utils_test.go} | 0 8 files changed, 252 insertions(+), 53 deletions(-) create mode 100644 pkg/schedule/schedule.go rename pkg/{scheduleutils/scheduleutils.go => schedule/utils/utils.go} (100%) rename pkg/{scheduleutils/scheduleutils_test.go => schedule/utils/utils_test.go} (100%) diff --git a/api/v1beta1/upgradeconfig_types.go b/api/v1beta1/upgradeconfig_types.go index 708a714..bcdbf79 100644 --- a/api/v1beta1/upgradeconfig_types.go +++ b/api/v1beta1/upgradeconfig_types.go @@ -67,6 +67,15 @@ type UpgradeConfigStatus struct { // Also is increased when a job would have been created, but was not created due to the config being suspended. // +optional LastScheduledUpgrade *metav1.Time `json:"lastScheduledUpgrade,omitempty"` + + // NextPossibleSchedules is a list of the next possible schedules for an upgrade. + NextPossibleSchedules []NextPossibleSchedule `json:"nextPossibleSchedules"` +} + +// NextPossibleSchedule defines the next possible schedule for an upgrade +type NextPossibleSchedule struct { + // Time is the time of the next possible schedule + Time metav1.Time `json:"time"` } //+kubebuilder:object:root=true diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 30a5ce3..66faf90 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -258,6 +258,22 @@ func (in *HookJobTracker) DeepCopy() *HookJobTracker { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NextPossibleSchedule) DeepCopyInto(out *NextPossibleSchedule) { + *out = *in + in.Time.DeepCopyInto(&out.Time) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NextPossibleSchedule. +func (in *NextPossibleSchedule) DeepCopy() *NextPossibleSchedule { + if in == nil { + return nil + } + out := new(NextPossibleSchedule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UpgradeConfig) DeepCopyInto(out *UpgradeConfig) { *out = *in @@ -392,6 +408,13 @@ func (in *UpgradeConfigStatus) DeepCopyInto(out *UpgradeConfigStatus) { in, out := &in.LastScheduledUpgrade, &out.LastScheduledUpgrade *out = (*in).DeepCopy() } + if in.NextPossibleSchedules != nil { + in, out := &in.NextPossibleSchedules, &out.NextPossibleSchedules + *out = make([]NextPossibleSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeConfigStatus. diff --git a/config/crd/bases/managedupgrade.appuio.io_upgradeconfigs.yaml b/config/crd/bases/managedupgrade.appuio.io_upgradeconfigs.yaml index 02321ba..d87489c 100644 --- a/config/crd/bases/managedupgrade.appuio.io_upgradeconfigs.yaml +++ b/config/crd/bases/managedupgrade.appuio.io_upgradeconfigs.yaml @@ -260,6 +260,23 @@ spec: Also is increased when a job would have been created, but was not created due to the config being suspended. format: date-time type: string + nextPossibleSchedules: + description: NextPossibleSchedules is a list of the next possible + schedules for an upgrade. + items: + description: NextPossibleSchedule defines the next possible schedule + for an upgrade + properties: + time: + description: Time is the time of the next possible schedule + format: date-time + type: string + required: + - time + type: object + type: array + required: + - nextPossibleSchedules type: object required: - spec diff --git a/controllers/upgradeconfig_controller.go b/controllers/upgradeconfig_controller.go index 6df2719..6c69cde 100644 --- a/controllers/upgradeconfig_controller.go +++ b/controllers/upgradeconfig_controller.go @@ -3,12 +3,14 @@ package controllers import ( "context" "fmt" + "math" "strconv" "strings" "time" configv1 "github.com/openshift/api/config/v1" "github.com/robfig/cron/v3" + "go.uber.org/multierr" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -21,7 +23,7 @@ import ( managedupgradev1beta1 "github.com/appuio/openshift-upgrade-controller/api/v1beta1" "github.com/appuio/openshift-upgrade-controller/pkg/clusterversion" - "github.com/appuio/openshift-upgrade-controller/pkg/scheduleutils" + "github.com/appuio/openshift-upgrade-controller/pkg/schedule" ) const ( @@ -56,16 +58,6 @@ type UpgradeConfigReconciler struct { // Reconcile implements the reconcile loop for UpgradeConfig. // It schedules UpgradeJobs based on the UpgradeConfig's schedule - if an update is available. func (r *UpgradeConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - ret, err := r.reconcile(ctx, req) - if err != nil || ret.RequeueAfter > 0 || ret.Requeue { - return ret, err - } - - // ensure we always requeue after a minute, if no requeue set, so we don't miss the next run on some corner cases - return ctrl.Result{RequeueAfter: time.Minute}, nil -} - -func (r *UpgradeConfigReconciler) reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := log.FromContext(ctx).WithName("UpgradeConfigReconciler.Reconcile") l.Info("Reconciling UpgradeConfig") @@ -86,9 +78,39 @@ func (r *UpgradeConfigReconciler) reconcile(ctx context.Context, req ctrl.Reques location = l } + s, err := cron.ParseStandard(uc.Spec.Schedule.Cron) + if err != nil { + return ctrl.Result{}, err + } + sched := schedule.Schedule{Schedule: s, IsoWeek: uc.Spec.Schedule.IsoWeek} + + jobSchedRequeue, schedErr := r.scheduleJob(ctx, &uc, sched, location) + statusRequeue, stErr := r.updateNextPossibleSchedulesStatus(ctx, &uc, sched, location) + if err := multierr.Append(schedErr, stErr); err != nil { + return ctrl.Result{}, err + } + + nextRequeue := time.Duration(math.MaxInt64) + if jobSchedRequeue > 0 && jobSchedRequeue < nextRequeue { + nextRequeue = jobSchedRequeue + } + if statusRequeue > 0 && statusRequeue < nextRequeue { + nextRequeue = statusRequeue + } + // ensure we always requeue after a minute, if no requeue set, so we don't miss the next run on some corner cases + if nextRequeue == math.MaxInt64 { + nextRequeue = time.Minute + } + + return ctrl.Result{RequeueAfter: nextRequeue}, nil +} + +func (r *UpgradeConfigReconciler) scheduleJob(ctx context.Context, uc *managedupgradev1beta1.UpgradeConfig, sched schedule.Schedule, location *time.Location) (time.Duration, error) { + l := log.FromContext(ctx).WithName("UpgradeConfigReconciler.scheduleJob") + jobs, err := r.getControlledJobs(ctx, uc) if err != nil { - return ctrl.Result{}, fmt.Errorf("could not get controlled jobs: %w", err) + return 0, fmt.Errorf("could not get controlled jobs: %w", err) } latestJob := latestScheduledJob(jobs) now := r.Clock.Now().In(location) @@ -100,37 +122,32 @@ func (r *UpgradeConfigReconciler) reconcile(ctx context.Context, req ctrl.Reques if latestJob != nil { // status might have failed to update, try again if uc.Status.LastScheduledUpgrade == nil || latestJob.Spec.StartAfter.After(uc.Status.LastScheduledUpgrade.Time) { - return ctrl.Result{}, r.setLastScheduledUpgrade(ctx, &uc, latestJob.Spec.StartAfter.Time) + return 0, r.setLastScheduledUpgrade(ctx, uc, latestJob.Spec.StartAfter.Time) } // if there is a future job scheduled, do nothing if latestJob.Spec.StartAfter.After(now) { l.Info("future job already scheduled", "job", latestJob.Name, "startAfter", latestJob.Spec.StartAfter.Time) - return ctrl.Result{}, nil + return 0, nil } // if the latest job is not completed, do nothing isCompleted := apimeta.IsStatusConditionTrue(latestJob.Status.Conditions, managedupgradev1beta1.UpgradeJobConditionSucceeded) || apimeta.IsStatusConditionTrue(latestJob.Status.Conditions, managedupgradev1beta1.UpgradeJobConditionFailed) if !isCompleted { l.Info("latest job not completed", "job", latestJob.Name) - return ctrl.Result{}, nil + return 0, nil } earliestTimestamp = latestJob.Spec.StartAfter.Time } - sched, err := cron.ParseStandard(uc.Spec.Schedule.Cron) - if err != nil { - return ctrl.Result{}, err - } - nextRun := earliestTimestamp.In(location) nextRunAttempts := 0 findNextRun: nextRunAttempts++ - nextRun, err = calcNextRun(nextRun, sched, uc.Spec.Schedule.IsoWeek) + nextRun, err = sched.Next(nextRun) if err != nil { - return ctrl.Result{}, fmt.Errorf("could not find next run: %w", err) + return 0, fmt.Errorf("could not find next run: %w", err) } nextCreateJobWindow := nextRun.Add(-uc.Spec.PinVersionWindow.Duration) @@ -138,44 +155,44 @@ findNextRun: // if we are not yet in the scheduling window, requeue until we are if now.Before(nextCreateJobWindow) { l.Info("not yet in scheduling window", "window", nextCreateJobWindow) - return ctrl.Result{RequeueAfter: nextCreateJobWindow.Sub(now)}, nil + return nextCreateJobWindow.Sub(now), nil } // find next scheduling window if we're past the current one if now.After(nextCreateJobWindow.Add(uc.Spec.MaxSchedulingDelay.Duration)) { if nextRunAttempts > 100 { - return ctrl.Result{}, fmt.Errorf("could not find next scheduling window after %d attempts. Most likely missed too many schedules", nextRunAttempts) + return 0, fmt.Errorf("could not find next scheduling window after %d attempts. Most likely missed too many schedules", nextRunAttempts) } goto findNextRun } var cv configv1.ClusterVersion if err := r.Client.Get(ctx, types.NamespacedName{Name: r.ManagedUpstreamClusterVersionName}, &cv); err != nil { - return ctrl.Result{}, fmt.Errorf("could not get cluster version: %w", err) + return 0, fmt.Errorf("could not get cluster version: %w", err) } // Schedule is suspended, do nothing if uc.Spec.Schedule.Suspend { l.Info("would schedule job, but schedule is suspended by .spec.schedule.suspend") - r.Recorder.Event(&uc, "Normal", EventReasonUpgradeConfigSuspended, "Upgrade scheduling is suspended by .spec.schedule.suspend") - return ctrl.Result{}, r.setLastScheduledUpgrade(ctx, &uc, nextRun) + r.Recorder.Event(uc, "Normal", EventReasonUpgradeConfigSuspended, "Upgrade scheduling is suspended by .spec.schedule.suspend") + return 0, r.setLastScheduledUpgrade(ctx, uc, nextRun) } // Check if we are in a suspension window - window, err := r.matchingUpgradeSuspensionWindow(ctx, uc, now) + window, err := r.matchingUpgradeSuspensionWindow(ctx, *uc, now) if err != nil { - return ctrl.Result{}, fmt.Errorf("could not search matching upgrade suspension window: %w", err) + return 0, fmt.Errorf("could not search matching upgrade suspension window: %w", err) } if window != nil { l.Info("would schedule job, but schedule is suspended by UpgradeSuspensionWindow", "window", window.Name, "reason", window.Spec.Reason, "start", window.Spec.Start.Time, "end", window.Spec.End.Time) - r.Recorder.Eventf(&uc, "Normal", EventReasonUpgradeConfigSuspendedBySuspensionWindow, "Upgrade scheduling is suspended by UpgradeSuspensionWindow %s: %s", window.Name, window.Spec.Reason) - return ctrl.Result{}, r.setLastScheduledUpgrade(ctx, &uc, nextRun) + r.Recorder.Eventf(uc, "Normal", EventReasonUpgradeConfigSuspendedBySuspensionWindow, "Upgrade scheduling is suspended by UpgradeSuspensionWindow %s: %s", window.Name, window.Spec.Reason) + return 0, r.setLastScheduledUpgrade(ctx, uc, nextRun) } latestUpdate := clusterversion.LatestAvailableUpdate(cv) - if err := r.createJob(uc, latestUpdate, nextRun, ctx); err != nil { - return ctrl.Result{}, fmt.Errorf("could not create job: %w", err) + if err := r.createJob(*uc, latestUpdate, nextRun, ctx); err != nil { + return 0, fmt.Errorf("could not create job: %w", err) } - return ctrl.Result{}, r.setLastScheduledUpgrade(ctx, &uc, nextRun) + return 0, r.setLastScheduledUpgrade(ctx, uc, nextRun) } func (r *UpgradeConfigReconciler) setLastScheduledUpgrade(ctx context.Context, uc *managedupgradev1beta1.UpgradeConfig, t time.Time) error { @@ -230,7 +247,7 @@ func (r *UpgradeConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *UpgradeConfigReconciler) getControlledJobs(ctx context.Context, uc managedupgradev1beta1.UpgradeConfig) ([]managedupgradev1beta1.UpgradeJob, error) { +func (r *UpgradeConfigReconciler) getControlledJobs(ctx context.Context, uc *managedupgradev1beta1.UpgradeConfig) ([]managedupgradev1beta1.UpgradeJob, error) { var jobs managedupgradev1beta1.UpgradeJobList if err := r.List(ctx, &jobs, client.InNamespace(uc.Namespace)); err != nil { return nil, err @@ -264,6 +281,25 @@ func (r *UpgradeConfigReconciler) matchingUpgradeSuspensionWindow(ctx context.Co return nil, nil } +func (r *UpgradeConfigReconciler) updateNextPossibleSchedulesStatus(ctx context.Context, uc *managedupgradev1beta1.UpgradeConfig, sched schedule.Schedule, location *time.Location) (time.Duration, error) { + now := r.Clock.Now().In(location) + np, err := sched.NextN(now, 10) + if err != nil && len(np) > 0 { + log.FromContext(ctx).Error(err, "could not get all possible next schedules", "n_schedules", len(np)) + } else if err != nil { + return 0, err + } + + uc.Status.NextPossibleSchedules = make([]managedupgradev1beta1.NextPossibleSchedule, len(np)) + for i, t := range np { + uc.Status.NextPossibleSchedules[i] = managedupgradev1beta1.NextPossibleSchedule{ + Time: metav1.NewTime(t), + } + } + + return np[0].Sub(now), r.Status().Update(ctx, uc) +} + func latestScheduledJob(jobs []managedupgradev1beta1.UpgradeJob) *managedupgradev1beta1.UpgradeJob { var latest *managedupgradev1beta1.UpgradeJob for _, job := range jobs { @@ -274,28 +310,12 @@ func latestScheduledJob(jobs []managedupgradev1beta1.UpgradeJob) *managedupgrade return latest } -func filterControlledJobs(uc managedupgradev1beta1.UpgradeConfig, jobs []managedupgradev1beta1.UpgradeJob) []managedupgradev1beta1.UpgradeJob { +func filterControlledJobs(uc *managedupgradev1beta1.UpgradeConfig, jobs []managedupgradev1beta1.UpgradeJob) []managedupgradev1beta1.UpgradeJob { ownedJobs := make([]managedupgradev1beta1.UpgradeJob, 0, len(jobs)) for _, job := range jobs { - if metav1.IsControlledBy(&job, &uc) { + if metav1.IsControlledBy(&job, uc) { ownedJobs = append(ownedJobs, job) } } return ownedJobs } - -func calcNextRun(earliest time.Time, sched cron.Schedule, schedISOWeek string) (time.Time, error) { - nextRun := sched.Next(earliest) - // if the next run is more than 1000 runs away, we assume that the cron schedule is invalid as a safe guard - for i := 0; i < 1000; i++ { - isoWeekOK, err := scheduleutils.CheckIsoWeek(nextRun, schedISOWeek) - if err != nil { - return time.Time{}, err - } - if isoWeekOK { - return nextRun, nil - } - nextRun = sched.Next(nextRun) - } - return time.Time{}, fmt.Errorf("could not find next run, max time: %s", nextRun) -} diff --git a/controllers/upgradeconfig_controller_test.go b/controllers/upgradeconfig_controller_test.go index d487eee..0918356 100644 --- a/controllers/upgradeconfig_controller_test.go +++ b/controllers/upgradeconfig_controller_test.go @@ -8,6 +8,7 @@ import ( "time" configv1 "github.com/openshift/api/config/v1" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -209,6 +210,75 @@ func Test_UpgradeConfigReconciler_Reconcile_E2E(t *testing.T) { }) } +func Test_UpgradeConfigReconciler_Reconcile_AddNextWindowsToStatus(t *testing.T) { + ctx := context.Background() + clock := mockClock{now: time.Date(2022, time.April, 4, 8, 0, 0, 0, time.UTC)} + t.Log("Now: ", clock.Now()) + require.Equal(t, 14, func() int { _, isoweek := clock.Now().ISOWeek(); return isoweek }()) + require.Equal(t, time.Monday, clock.Now().Weekday()) + + ucv := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "version", + }, + } + + upgradeConfig := &managedupgradev1beta1.UpgradeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "daily-maintenance", + Namespace: "appuio-openshift-upgrade-controller", + CreationTimestamp: metav1.Time{Time: clock.Now().Add(-time.Hour)}, + }, + Spec: managedupgradev1beta1.UpgradeConfigSpec{ + Schedule: managedupgradev1beta1.UpgradeConfigSchedule{ + Cron: "0 22 * * *", // At 22:00 every day + Location: "UTC", + Suspend: true, + }, + MaxSchedulingDelay: metav1.Duration{Duration: time.Minute}, + JobTemplate: managedupgradev1beta1.UpgradeConfigJobTemplate{ + Metadata: metav1.ObjectMeta{ + Labels: map[string]string{"app": "openshift-upgrade-controller"}, + }, + }, + }, + } + + client := controllerClient(t, ucv, upgradeConfig) + + recorder := record.NewFakeRecorder(5) + subject := &UpgradeConfigReconciler{ + Client: client, + Scheme: client.Scheme(), + Recorder: recorder, + + Clock: &clock, + + ManagedUpstreamClusterVersionName: "version", + } + + res, err := subject.Reconcile(ctx, requestForObject(upgradeConfig)) + require.NoError(t, err) + + var uuc managedupgradev1beta1.UpgradeConfig + require.NoError(t, client.Get(ctx, types.NamespacedName{Name: upgradeConfig.Name, Namespace: upgradeConfig.Namespace}, &uuc)) + nextTime := time.Date(2022, time.April, 4, 22, 0, 0, 0, time.UTC) + assert.Equal(t, res.RequeueAfter, nextTime.Sub(clock.Now())) + + expected := make([]string, 10) + for i := 0; i < 10; i++ { + expected[i] = nextTime.Format(time.RFC3339) + nextTime = nextTime.Add(24 * time.Hour) + } + + got := make([]string, 10) + for i, t := range uuc.Status.NextPossibleSchedules { + got[i] = t.Time.UTC().Format(time.RFC3339) + } + + assert.Equal(t, expected, got) +} + func Test_UpgradeConfigReconciler_Reconcile_SuspendedByWindow(t *testing.T) { ctx := context.Background() clock := mockClock{now: time.Date(2022, time.April, 4, 8, 0, 0, 0, time.UTC)} diff --git a/pkg/schedule/schedule.go b/pkg/schedule/schedule.go new file mode 100644 index 0000000..47133ee --- /dev/null +++ b/pkg/schedule/schedule.go @@ -0,0 +1,60 @@ +package schedule + +import ( + "fmt" + "time" + + "github.com/robfig/cron/v3" + + scheduleutils "github.com/appuio/openshift-upgrade-controller/pkg/schedule/utils" +) + +// ErrNoNextFound is returned when no no next time could be found +var ErrNoNextFound = fmt.Errorf("no next time found") + +// Schedule is a wrapper around cron.Schedule that adds the IsoWeek field +type Schedule struct { + cron.Schedule + + // The week of the year to run the job + // 1-53 or @odd, @even + // Empty matches any week of the year + IsoWeek string +} + +// NextN returns the next n activation times of the schedule after the earliest time. +// If the schedule is invalid, it will return an error. +// If the schedule is valid but no next time could be found (too far in the future), it will return an error with ErrNoNextFound. +// If an error occurs the returned slice will contain len() valid activation times that were found before the error occurred. +func (s Schedule) NextN(earliest time.Time, n int) ([]time.Time, error) { + nextTimes := make([]time.Time, 0, n) + + for i := 0; i < n; i++ { + nextTime, err := s.Next(earliest) + if err != nil { + return nextTimes, err + } + nextTimes = append(nextTimes, nextTime) + earliest = nextTime + } + return nextTimes, nil +} + +// Next returns the next activation time of the schedule after the earliest time. +// If the schedule is invalid, it will return an error. +// If the schedule is valid but no next time could be found (too far in the future), it will return an error with ErrNoNextFound. +func (s Schedule) Next(earliest time.Time) (time.Time, error) { + n := s.Schedule.Next(earliest) + // if the next activation time is more than 1000 runs away, we assume that the cron schedule is invalid as a safe guard + for i := 0; i < 1000; i++ { + isoWeekOK, err := scheduleutils.CheckIsoWeek(n, s.IsoWeek) + if err != nil { + return time.Time{}, err + } + if isoWeekOK { + return n, nil + } + n = s.Schedule.Next(n) + } + return time.Time{}, fmt.Errorf("could not find next scheduled time, checked until %q: %w", n, ErrNoNextFound) +} diff --git a/pkg/scheduleutils/scheduleutils.go b/pkg/schedule/utils/utils.go similarity index 100% rename from pkg/scheduleutils/scheduleutils.go rename to pkg/schedule/utils/utils.go diff --git a/pkg/scheduleutils/scheduleutils_test.go b/pkg/schedule/utils/utils_test.go similarity index 100% rename from pkg/scheduleutils/scheduleutils_test.go rename to pkg/schedule/utils/utils_test.go From ec85c44820086597de415ce0d745ae03882a052f Mon Sep 17 00:00:00 2001 From: Sebastian Widmer Date: Wed, 2 Oct 2024 09:35:04 +0200 Subject: [PATCH 2/5] Add schedule tests, simplify packages --- pkg/schedule/schedule.go | 29 +++++- pkg/schedule/schedule_test.go | 147 +++++++++++++++++++++++++++++++ pkg/schedule/utils/utils.go | 31 ------- pkg/schedule/utils/utils_test.go | 77 ---------------- 4 files changed, 173 insertions(+), 111 deletions(-) create mode 100644 pkg/schedule/schedule_test.go delete mode 100644 pkg/schedule/utils/utils.go delete mode 100644 pkg/schedule/utils/utils_test.go diff --git a/pkg/schedule/schedule.go b/pkg/schedule/schedule.go index 47133ee..19b706f 100644 --- a/pkg/schedule/schedule.go +++ b/pkg/schedule/schedule.go @@ -2,11 +2,10 @@ package schedule import ( "fmt" + "strconv" "time" "github.com/robfig/cron/v3" - - scheduleutils "github.com/appuio/openshift-upgrade-controller/pkg/schedule/utils" ) // ErrNoNextFound is returned when no no next time could be found @@ -47,7 +46,7 @@ func (s Schedule) Next(earliest time.Time) (time.Time, error) { n := s.Schedule.Next(earliest) // if the next activation time is more than 1000 runs away, we assume that the cron schedule is invalid as a safe guard for i := 0; i < 1000; i++ { - isoWeekOK, err := scheduleutils.CheckIsoWeek(n, s.IsoWeek) + isoWeekOK, err := checkIsoWeek(n, s.IsoWeek) if err != nil { return time.Time{}, err } @@ -58,3 +57,27 @@ func (s Schedule) Next(earliest time.Time) (time.Time, error) { } return time.Time{}, fmt.Errorf("could not find next scheduled time, checked until %q: %w", n, ErrNoNextFound) } + +// checkIsoWeek checks if the given time is in the given iso week. +// The iso week can be one of the following: +// - "": every iso week +// - "@even": every even iso week +// - "@odd": every odd iso week +// - "": every iso week N +func checkIsoWeek(t time.Time, schedISOWeek string) (bool, error) { + _, iw := t.ISOWeek() + switch schedISOWeek { + case "": + return true, nil + case "@even": + return iw%2 == 0, nil + case "@odd": + return iw%2 == 1, nil + } + + nw, err := strconv.ParseInt(schedISOWeek, 10, 64) + if err == nil { + return nw == int64(iw), nil + } + return false, fmt.Errorf("unknown iso week: %s", schedISOWeek) +} diff --git a/pkg/schedule/schedule_test.go b/pkg/schedule/schedule_test.go new file mode 100644 index 0000000..9c6ef24 --- /dev/null +++ b/pkg/schedule/schedule_test.go @@ -0,0 +1,147 @@ +package schedule + +import ( + "fmt" + "testing" + "time" + + "github.com/robfig/cron/v3" + "github.com/stretchr/testify/require" +) + +func Test_Schedule_Next(t *testing.T) { + s, err := cron.ParseStandard("0 10 * * 6") + require.NoError(t, err) + + subject := Schedule{ + Schedule: s, + IsoWeek: "@even", + } + + expected, err := subject.Next(time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)) + require.NoError(t, err) + require.Equal(t, expected, time.Date(2021, 1, 16, 10, 0, 0, 0, time.UTC)) +} + +func Test_Schedule_Err(t *testing.T) { + s, err := cron.ParseStandard("0 10 * * 6") + require.NoError(t, err) + + subject := Schedule{ + Schedule: s, + IsoWeek: "@asdasd", + } + + _, err = subject.Next(time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)) + require.Error(t, err, "unknown iso week: asdasd") +} + +func Test_Schedule_NextN(t *testing.T) { + s, err := cron.ParseStandard("0 10 * * 6") + require.NoError(t, err) + + subject := Schedule{ + Schedule: s, + IsoWeek: "@even", + } + + tt := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC) + next, err := subject.NextN(tt, 10) + require.NoError(t, err) + require.Len(t, next, 10) + + expected := make([]string, 10) + start := time.Date(2021, 1, 16, 10, 0, 0, 0, time.UTC) + for i := range expected { + expected[i] = start.Add(time.Hour * 24 * 7 * 2 * time.Duration(i)).Format(time.RFC3339) + } + got := make([]string, 10) + for i := range next { + got[i] = next[i].Format(time.RFC3339) + } + + require.Equal(t, expected, got) +} + +func Test_Schedule_NextN_Short(t *testing.T) { + s, err := cron.ParseStandard("0 10 3 1 *") + require.NoError(t, err) + + subject := Schedule{ + Schedule: s, + IsoWeek: "4", + } + + tt := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC) + next, err := subject.NextN(tt, 10) + require.Error(t, err, "no next time found") + require.Len(t, next, 0) +} + +func Test_checkIsoWeek(t *testing.T) { + tc := []struct { + t time.Time + schedISOWeek string + expected bool + expectedErr error + }{ + { + t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), + schedISOWeek: "", + expected: true, + }, + { + t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), + schedISOWeek: "@even", + expected: false, + }, + { + t: time.Date(2021, 1, 13, 0, 0, 0, 0, time.UTC), + schedISOWeek: "@even", + expected: true, + }, + { + t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), + schedISOWeek: "@odd", + expected: true, + }, + { + t: time.Date(2021, 1, 13, 0, 0, 0, 0, time.UTC), + schedISOWeek: "@odd", + expected: false, + }, + { + t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), + schedISOWeek: "53", + expected: true, + }, + { + t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), + schedISOWeek: "1", + expected: false, + }, + { + t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), + schedISOWeek: "invalid", + expectedErr: fmt.Errorf("unknown iso week: invalid"), + }, + } + + for _, c := range tc { + _, iw := c.t.ISOWeek() + t.Run(fmt.Sprintf("sched: %s, iso week from time: %d", c.schedISOWeek, iw), func(t *testing.T) { + got, err := checkIsoWeek(c.t, c.schedISOWeek) + if err != nil { + if c.expectedErr == nil { + t.Fatalf("unexpected error: %v", err) + } + require.Equal(t, c.expectedErr.Error(), err.Error()) + return + } + if c.expectedErr != nil { + t.Fatalf("expected error %q, got nil", c.expectedErr) + } + require.Equal(t, c.expected, got) + }) + } +} diff --git a/pkg/schedule/utils/utils.go b/pkg/schedule/utils/utils.go deleted file mode 100644 index da9acd0..0000000 --- a/pkg/schedule/utils/utils.go +++ /dev/null @@ -1,31 +0,0 @@ -package scheduleutils - -import ( - "fmt" - "strconv" - "time" -) - -// CheckIsoWeek checks if the given time is in the given iso week. -// The iso week can be one of the following: -// - "": every iso week -// - "@even": every even iso week -// - "@odd": every odd iso week -// - "": every iso week N -func CheckIsoWeek(t time.Time, schedISOWeek string) (bool, error) { - _, iw := t.ISOWeek() - switch schedISOWeek { - case "": - return true, nil - case "@even": - return iw%2 == 0, nil - case "@odd": - return iw%2 == 1, nil - } - - nw, err := strconv.ParseInt(schedISOWeek, 10, 64) - if err == nil { - return nw == int64(iw), nil - } - return false, fmt.Errorf("unknown iso week: %s", schedISOWeek) -} diff --git a/pkg/schedule/utils/utils_test.go b/pkg/schedule/utils/utils_test.go deleted file mode 100644 index 3b01da5..0000000 --- a/pkg/schedule/utils/utils_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package scheduleutils - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestCheckIsoWeek(t *testing.T) { - tc := []struct { - t time.Time - schedISOWeek string - expected bool - expectedErr error - }{ - { - t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - schedISOWeek: "", - expected: true, - }, - { - t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - schedISOWeek: "@even", - expected: false, - }, - { - t: time.Date(2021, 1, 13, 0, 0, 0, 0, time.UTC), - schedISOWeek: "@even", - expected: true, - }, - { - t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - schedISOWeek: "@odd", - expected: true, - }, - { - t: time.Date(2021, 1, 13, 0, 0, 0, 0, time.UTC), - schedISOWeek: "@odd", - expected: false, - }, - { - t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - schedISOWeek: "53", - expected: true, - }, - { - t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - schedISOWeek: "1", - expected: false, - }, - { - t: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - schedISOWeek: "invalid", - expectedErr: fmt.Errorf("unknown iso week: invalid"), - }, - } - - for _, c := range tc { - _, iw := c.t.ISOWeek() - t.Run(fmt.Sprintf("sched: %s, iso week from time: %d", c.schedISOWeek, iw), func(t *testing.T) { - got, err := CheckIsoWeek(c.t, c.schedISOWeek) - if err != nil { - if c.expectedErr == nil { - t.Fatalf("unexpected error: %v", err) - } - require.Equal(t, c.expectedErr.Error(), err.Error()) - return - } - if c.expectedErr != nil { - t.Fatalf("expected error %q, got nil", c.expectedErr) - } - require.Equal(t, c.expected, got) - }) - } -} From b847343f3a807949a4d632f9e862cbdfb0854ec3 Mon Sep 17 00:00:00 2001 From: Sebastian Widmer Date: Wed, 2 Oct 2024 09:37:17 +0200 Subject: [PATCH 3/5] Add metrics and test --- controllers/upgrade_information_collector.go | 76 ++++++++++++++++-- .../upgrade_information_collector_test.go | 77 ++++++++++++++++++- 2 files changed, 144 insertions(+), 9 deletions(-) diff --git a/controllers/upgrade_information_collector.go b/controllers/upgrade_information_collector.go index 4437e1c..0afc250 100644 --- a/controllers/upgrade_information_collector.go +++ b/controllers/upgrade_information_collector.go @@ -45,7 +45,7 @@ var poolsPausedDesc = prometheus.NewDesc( nil, ) -var jobStates = prometheus.NewDesc( +var jobStateDesc = prometheus.NewDesc( MetricsNamespace+"_upgradejob_state", "Returns the state of jobs in the cluster. 'pending', 'active', 'succeeded', or 'failed' are possible states. Final states may have a reason.", []string{ @@ -62,6 +62,35 @@ var jobStates = prometheus.NewDesc( nil, ) +var jobStartAfterDesc = prometheus.NewDesc( + MetricsNamespace+"_upgradejob_start_after_timestamp_seconds", + "The value of the startAfter field of the job.", + []string{ + "upgradejob", + }, + nil, +) + +var jobStartBeforeDesc = prometheus.NewDesc( + MetricsNamespace+"_upgradejob_start_before_timestamp_seconds", + "The value of the startBefore field of the job.", + []string{ + "upgradejob", + }, + nil, +) + +var upgradeConfigNextPossibleScheduleDesc = prometheus.NewDesc( + MetricsNamespace+"_upgradeconfig_next_possible_schedule_timestamp_seconds", + "The value of the time field of the next possible schedule for an upgrade.", + []string{ + "upgradeconfig", + "n", + "timestamp", + }, + nil, +) + // UpgradeInformationCollector is a Prometheus collector that exposes various metrics about the upgrade process. type UpgradeInformationCollector struct { client.Client @@ -77,7 +106,10 @@ func (*UpgradeInformationCollector) Describe(ch chan<- *prometheus.Desc) { ch <- clusterUpgradingDesc ch <- poolsUpgradingDesc ch <- poolsPausedDesc - ch <- jobStates + ch <- jobStateDesc + ch <- jobStartAfterDesc + ch <- jobStartBeforeDesc + ch <- upgradeConfigNextPossibleScheduleDesc } // Collect implements prometheus.Collector. @@ -127,15 +159,37 @@ func (m *UpgradeInformationCollector) Collect(ch chan<- prometheus.Metric) { ) } + var configs managedupgradev1beta1.UpgradeConfigList + if err := m.Client.List(ctx, &configs); err != nil { + ch <- prometheus.NewInvalidMetric(upgradeConfigNextPossibleScheduleDesc, fmt.Errorf("failed to list upgrade jobs: %w", err)) + } else { + for _, config := range configs.Items { + for i, nps := range config.Status.NextPossibleSchedules { + ch <- prometheus.MustNewConstMetric( + upgradeConfigNextPossibleScheduleDesc, + prometheus.GaugeValue, + float64(nps.Time.Unix()), + config.Name, + strconv.Itoa(i), + nps.Time.UTC().Format(time.RFC3339), + ) + } + } + } + var jobs managedupgradev1beta1.UpgradeJobList if err := m.Client.List(ctx, &jobs); err != nil { - ch <- prometheus.NewInvalidMetric(jobStates, fmt.Errorf("failed to list upgrade jobs: %w", err)) + ch <- prometheus.NewInvalidMetric(jobStateDesc, fmt.Errorf("failed to list upgrade jobs: %w", err)) + ch <- prometheus.NewInvalidMetric(jobStartAfterDesc, fmt.Errorf("failed to list upgrade jobs: %w", err)) + ch <- prometheus.NewInvalidMetric(jobStartBeforeDesc, fmt.Errorf("failed to list upgrade jobs: %w", err)) return } var jobsHooks managedupgradev1beta1.UpgradeJobHookList if err := m.Client.List(ctx, &jobsHooks); err != nil { - ch <- prometheus.NewInvalidMetric(jobStates, fmt.Errorf("failed to list upgrade job hooks: %w", err)) + ch <- prometheus.NewInvalidMetric(jobStateDesc, fmt.Errorf("failed to list upgrade job hooks: %w", err)) + ch <- prometheus.NewInvalidMetric(jobStartAfterDesc, fmt.Errorf("failed to list upgrade jobs: %w", err)) + ch <- prometheus.NewInvalidMetric(jobStartBeforeDesc, fmt.Errorf("failed to list upgrade jobs: %w", err)) return } @@ -145,7 +199,7 @@ func (m *UpgradeInformationCollector) Collect(ch chan<- prometheus.Metric) { v = &configv1.Update{} } ch <- prometheus.MustNewConstMetric( - jobStates, + jobStateDesc, prometheus.GaugeValue, 1, job.Name, @@ -158,6 +212,18 @@ func (m *UpgradeInformationCollector) Collect(ch chan<- prometheus.Metric) { jobStateReason(job), strconv.FormatBool(jobHasMatchingDisruptiveHook(job, jobsHooks)), ) + ch <- prometheus.MustNewConstMetric( + jobStartAfterDesc, + prometheus.GaugeValue, + float64(job.Spec.StartAfter.Unix()), + job.Name, + ) + ch <- prometheus.MustNewConstMetric( + jobStartBeforeDesc, + prometheus.GaugeValue, + float64(job.Spec.StartBefore.Unix()), + job.Name, + ) } } diff --git a/controllers/upgrade_information_collector_test.go b/controllers/upgrade_information_collector_test.go index a08c1f0..d96e60f 100644 --- a/controllers/upgrade_information_collector_test.go +++ b/controllers/upgrade_information_collector_test.go @@ -19,6 +19,8 @@ import ( func Test_ClusterUpgradingMetric(t *testing.T) { expectedMetricNames := []string{ "openshift_upgrade_controller_upgradejob_state", + "openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds", + "openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds", "openshift_upgrade_controller_cluster_upgrading", "openshift_upgrade_controller_machine_config_pools_upgrading", "openshift_upgrade_controller_machine_config_pools_paused", @@ -233,7 +235,7 @@ func Test_ClusterUpgradingMetric(t *testing.T) { } require.NoError(t, - testutil.CollectAndCompare(subject, expectedMetrics(true, false, false), expectedMetricNames...), + testutil.CollectAndCompare(subject, expectedUpgradingMetrics(true, false, false), expectedMetricNames...), "upgrading should be true if cluster version is progressing", ) @@ -247,7 +249,7 @@ func Test_ClusterUpgradingMetric(t *testing.T) { require.NoError(t, c.Status().Update(context.Background(), workerPool)) require.NoError(t, - testutil.CollectAndCompare(subject, expectedMetrics(true, false, true), expectedMetricNames...), + testutil.CollectAndCompare(subject, expectedUpgradingMetrics(true, false, true), expectedMetricNames...), "upgrading should be true if cluster version is progressing or a machine config pool is not fully upgraded", ) @@ -255,12 +257,58 @@ func Test_ClusterUpgradingMetric(t *testing.T) { require.NoError(t, c.Status().Update(context.Background(), workerPool)) require.NoError(t, - testutil.CollectAndCompare(subject, expectedMetrics(false, false, false), expectedMetricNames...), + testutil.CollectAndCompare(subject, expectedUpgradingMetrics(false, false, false), expectedMetricNames...), "upgrading should be false if cluster version is not progressing and all machine config pools are fully upgraded", ) } -func expectedMetrics(upgrading, masterUpgrading, workerUpgrading bool) io.Reader { +func Test_UpgradeConfigMetric(t *testing.T) { + expectedMetricNames := []string{ + "openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds", + } + + version := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "version", + }, + } + uc := &managedupgradev1beta1.UpgradeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myconfig", + }, + Status: managedupgradev1beta1.UpgradeConfigStatus{ + NextPossibleSchedules: []managedupgradev1beta1.NextPossibleSchedule{ + { + Time: metav1.Time{Time: time.Date(2022, 12, 4, 22, 45, 0, 0, time.UTC)}, + }, { + Time: metav1.Time{Time: time.Date(2022, 12, 24, 22, 45, 0, 0, time.UTC)}, + }, { + Time: metav1.Time{Time: time.Date(2023, 2, 7, 22, 45, 0, 0, time.UTC)}, + }, + }, + }, + } + c := controllerClient(t, version, uc) + subject := &UpgradeInformationCollector{ + Client: c, + + ManagedUpstreamClusterVersionName: "version", + } + + metrics := ` +# HELP openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds The value of the time field of the next possible schedule for an upgrade. +# TYPE openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds gauge +openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds{n="0",timestamp="2022-12-04T22:45:00Z",upgradeconfig="myconfig"} 1.6701939e+09 +openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds{n="1",timestamp="2022-12-24T22:45:00Z",upgradeconfig="myconfig"} 1.6719219e+09 +openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds{n="2",timestamp="2023-02-07T22:45:00Z",upgradeconfig="myconfig"} 1.6758099e+09 +` + + require.NoError(t, + testutil.CollectAndCompare(subject, strings.NewReader(metrics), expectedMetricNames...), + ) +} + +func expectedUpgradingMetrics(upgrading, masterUpgrading, workerUpgrading bool) io.Reader { metrics := ` # HELP openshift_upgrade_controller_cluster_upgrading Set to 1 if the cluster is currently upgrading, 0 otherwise. # TYPE openshift_upgrade_controller_cluster_upgrading gauge @@ -288,6 +336,27 @@ openshift_upgrade_controller_upgradejob_state{desired_version_force="true",desir openshift_upgrade_controller_upgradejob_state{desired_version_force="false",desired_version_image="",desired_version_version="",matches_disruptive_hooks="true",reason="",start_after="0001-01-01T00:00:00Z",start_before="0001-01-01T00:00:00Z",state="pending",upgradejob="disruptive"} 1 openshift_upgrade_controller_upgradejob_state{desired_version_force="false",desired_version_image="",desired_version_version="",matches_disruptive_hooks="true",reason="",start_after="0001-01-01T00:00:00Z",start_before="0001-01-01T00:00:00Z",state="pending",upgradejob="disruptive-unclaimed-next"} 1 openshift_upgrade_controller_upgradejob_state{desired_version_force="false",desired_version_image="",desired_version_version="",matches_disruptive_hooks="false",reason="",start_after="0001-01-01T00:00:00Z",start_before="0001-01-01T00:00:00Z",state="pending",upgradejob="disruptive-claimed-next"} 1 + +# HELP openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds The value of the startAfter field of the job. +# TYPE openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds gauge +openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds{upgradejob="active"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds{upgradejob="disruptive"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds{upgradejob="disruptive-claimed-next"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds{upgradejob="disruptive-unclaimed-next"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds{upgradejob="failed"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds{upgradejob="paused"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds{upgradejob="pending"} 1.5795504e+09 +openshift_upgrade_controller_upgradejob_start_after_timestamp_seconds{upgradejob="succeeded"} -6.21355968e+10 +# HELP openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds The value of the startBefore field of the job. +# TYPE openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds gauge +openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds{upgradejob="active"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds{upgradejob="disruptive"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds{upgradejob="disruptive-claimed-next"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds{upgradejob="disruptive-unclaimed-next"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds{upgradejob="failed"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds{upgradejob="paused"} -6.21355968e+10 +openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds{upgradejob="pending"} 1.579554e+09 +openshift_upgrade_controller_upgradejob_start_before_timestamp_seconds{upgradejob="succeeded"} -6.21355968e+10 ` return strings.NewReader( fmt.Sprintf(metrics, b2i(upgrading), b2i(masterUpgrading), b2i(workerUpgrading)), From 988b8f7d7361c63cd0d79000d44feddb0b6b1f5c Mon Sep 17 00:00:00 2001 From: Sebastian Widmer Date: Wed, 2 Oct 2024 13:20:04 +0200 Subject: [PATCH 4/5] Add upgrade config info metric --- controllers/upgrade_information_collector.go | 22 +++++++++++++++++++ .../upgrade_information_collector_test.go | 12 ++++++++++ 2 files changed, 34 insertions(+) diff --git a/controllers/upgrade_information_collector.go b/controllers/upgrade_information_collector.go index 0afc250..1cfbe85 100644 --- a/controllers/upgrade_information_collector.go +++ b/controllers/upgrade_information_collector.go @@ -80,6 +80,18 @@ var jobStartBeforeDesc = prometheus.NewDesc( nil, ) +var upgradeConfigInfoDesc = prometheus.NewDesc( + MetricsNamespace+"_upgradeconfig_info", + "Information about the upgradeconfig object", + []string{ + "upgradeconfig", + "cron", + "location", + "suspended", + }, + nil, +) + var upgradeConfigNextPossibleScheduleDesc = prometheus.NewDesc( MetricsNamespace+"_upgradeconfig_next_possible_schedule_timestamp_seconds", "The value of the time field of the next possible schedule for an upgrade.", @@ -109,6 +121,7 @@ func (*UpgradeInformationCollector) Describe(ch chan<- *prometheus.Desc) { ch <- jobStateDesc ch <- jobStartAfterDesc ch <- jobStartBeforeDesc + ch <- upgradeConfigInfoDesc ch <- upgradeConfigNextPossibleScheduleDesc } @@ -164,6 +177,15 @@ func (m *UpgradeInformationCollector) Collect(ch chan<- prometheus.Metric) { ch <- prometheus.NewInvalidMetric(upgradeConfigNextPossibleScheduleDesc, fmt.Errorf("failed to list upgrade jobs: %w", err)) } else { for _, config := range configs.Items { + ch <- prometheus.MustNewConstMetric( + upgradeConfigInfoDesc, + prometheus.GaugeValue, + 1, + config.Name, + config.Spec.Schedule.Cron, + config.Spec.Schedule.Location, + strconv.FormatBool(config.Spec.Schedule.Suspend), + ) for i, nps := range config.Status.NextPossibleSchedules { ch <- prometheus.MustNewConstMetric( upgradeConfigNextPossibleScheduleDesc, diff --git a/controllers/upgrade_information_collector_test.go b/controllers/upgrade_information_collector_test.go index d96e60f..d71cb6a 100644 --- a/controllers/upgrade_information_collector_test.go +++ b/controllers/upgrade_information_collector_test.go @@ -264,6 +264,7 @@ func Test_ClusterUpgradingMetric(t *testing.T) { func Test_UpgradeConfigMetric(t *testing.T) { expectedMetricNames := []string{ + "openshift_upgrade_controller_upgradeconfig_info", "openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds", } @@ -276,6 +277,13 @@ func Test_UpgradeConfigMetric(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "myconfig", }, + Spec: managedupgradev1beta1.UpgradeConfigSpec{ + Schedule: managedupgradev1beta1.UpgradeConfigSchedule{ + Cron: "0 22 * * *", + IsoWeek: "@odd", + Location: "UTC", + }, + }, Status: managedupgradev1beta1.UpgradeConfigStatus{ NextPossibleSchedules: []managedupgradev1beta1.NextPossibleSchedule{ { @@ -296,6 +304,10 @@ func Test_UpgradeConfigMetric(t *testing.T) { } metrics := ` +# HELP openshift_upgrade_controller_upgradeconfig_info Information about the upgradeconfig object +# TYPE openshift_upgrade_controller_upgradeconfig_info gauge +openshift_upgrade_controller_upgradeconfig_info{cron="0 22 * * *",location="UTC",suspended="false",upgradeconfig="myconfig"} 1 + # HELP openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds The value of the time field of the next possible schedule for an upgrade. # TYPE openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds gauge openshift_upgrade_controller_upgradeconfig_next_possible_schedule_timestamp_seconds{n="0",timestamp="2022-12-04T22:45:00Z",upgradeconfig="myconfig"} 1.6701939e+09 From 5991f682c7c53e12c4913f581b1f6c0e6ac84bf5 Mon Sep 17 00:00:00 2001 From: Sebastian Widmer Date: Wed, 2 Oct 2024 20:54:02 +0200 Subject: [PATCH 5/5] UpgradeSuspensionWindow: Add matching config and jobs to status and metric --- api/v1beta1/upgradesuspensionwindow_types.go | 8 ++ api/v1beta1/zz_generated.deepcopy.go | 27 +++- ...de.appuio.io_upgradesuspensionwindows.yaml | 23 ++++ config/rbac/role.yaml | 2 + controllers/upgrade_information_collector.go | 81 ++++++++++++ .../upgrade_information_collector_test.go | 57 ++++++++ .../upgradesuspensionwindow_controller.go | 91 +++++++++++++ ...upgradesuspensionwindow_controller_test.go | 123 ++++++++++++++++++ main.go | 7 + 9 files changed, 418 insertions(+), 1 deletion(-) create mode 100644 controllers/upgradesuspensionwindow_controller.go create mode 100644 controllers/upgradesuspensionwindow_controller_test.go diff --git a/api/v1beta1/upgradesuspensionwindow_types.go b/api/v1beta1/upgradesuspensionwindow_types.go index b08dbbc..d7e605b 100644 --- a/api/v1beta1/upgradesuspensionwindow_types.go +++ b/api/v1beta1/upgradesuspensionwindow_types.go @@ -32,6 +32,14 @@ type UpgradeSuspensionWindowSpec struct { // UpgradeSuspensionWindowStatus defines the observed state of UpgradeSuspensionWindow type UpgradeSuspensionWindowStatus struct { + // MatchingConfigs are the UpgradeConfigs that are matched by the ConfigSelector. + MatchingConfigs []UpgradeSuspensionWindowStatusMatchingObject `json:"matchingConfigs,omitempty"` + // MatchingJobs are the UpgradeJobs that are matched by the JobSelector. + MatchingJobs []UpgradeSuspensionWindowStatusMatchingObject `json:"matchingJobs,omitempty"` +} + +type UpgradeSuspensionWindowStatusMatchingObject struct { + Name string `json:"name"` } //+kubebuilder:object:root=true diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 66faf90..8f30901 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -718,7 +718,7 @@ func (in *UpgradeSuspensionWindow) DeepCopyInto(out *UpgradeSuspensionWindow) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSuspensionWindow. @@ -801,6 +801,16 @@ func (in *UpgradeSuspensionWindowSpec) DeepCopy() *UpgradeSuspensionWindowSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UpgradeSuspensionWindowStatus) DeepCopyInto(out *UpgradeSuspensionWindowStatus) { *out = *in + if in.MatchingConfigs != nil { + in, out := &in.MatchingConfigs, &out.MatchingConfigs + *out = make([]UpgradeSuspensionWindowStatusMatchingObject, len(*in)) + copy(*out, *in) + } + if in.MatchingJobs != nil { + in, out := &in.MatchingJobs, &out.MatchingJobs + *out = make([]UpgradeSuspensionWindowStatusMatchingObject, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSuspensionWindowStatus. @@ -812,3 +822,18 @@ func (in *UpgradeSuspensionWindowStatus) DeepCopy() *UpgradeSuspensionWindowStat in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeSuspensionWindowStatusMatchingObject) DeepCopyInto(out *UpgradeSuspensionWindowStatusMatchingObject) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSuspensionWindowStatusMatchingObject. +func (in *UpgradeSuspensionWindowStatusMatchingObject) DeepCopy() *UpgradeSuspensionWindowStatusMatchingObject { + if in == nil { + return nil + } + out := new(UpgradeSuspensionWindowStatusMatchingObject) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/managedupgrade.appuio.io_upgradesuspensionwindows.yaml b/config/crd/bases/managedupgrade.appuio.io_upgradesuspensionwindows.yaml index 41d5ac0..1ed1eae 100644 --- a/config/crd/bases/managedupgrade.appuio.io_upgradesuspensionwindows.yaml +++ b/config/crd/bases/managedupgrade.appuio.io_upgradesuspensionwindows.yaml @@ -160,6 +160,29 @@ spec: status: description: UpgradeSuspensionWindowStatus defines the observed state of UpgradeSuspensionWindow + properties: + matchingConfigs: + description: MatchingConfigs are the UpgradeConfigs that are matched + by the ConfigSelector. + items: + properties: + name: + type: string + required: + - name + type: object + type: array + matchingJobs: + description: MatchingJobs are the UpgradeJobs that are matched by + the JobSelector. + items: + properties: + name: + type: string + required: + - name + type: object + type: array type: object type: object served: true diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index c9808b8..24811f5 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -156,3 +156,5 @@ rules: - upgradesuspensionwindows/status verbs: - get + - patch + - update diff --git a/controllers/upgrade_information_collector.go b/controllers/upgrade_information_collector.go index 1cfbe85..ed271d3 100644 --- a/controllers/upgrade_information_collector.go +++ b/controllers/upgrade_information_collector.go @@ -92,6 +92,44 @@ var upgradeConfigInfoDesc = prometheus.NewDesc( nil, ) +var upgradeSuspensionWindowInfoDesc = prometheus.NewDesc( + MetricsNamespace+"_upgradesuspensionwindow_info", + "Information about the upgradesuspensionwindow object", + []string{ + "upgradesuspensionwindow", + "reason", + }, + nil, +) + +var upgradeSuspensionWindowStartDesc = prometheus.NewDesc( + MetricsNamespace+"_upgradesuspensionwindow_start_timestamp_seconds", + "The value of the start field of the suspension window.", + []string{ + "upgradesuspensionwindow", + }, + nil, +) + +var upgradeSuspensionWindowEndDesc = prometheus.NewDesc( + MetricsNamespace+"_upgradesuspensionwindow_end_timestamp_seconds", + "The value of the start field of the suspension window.", + []string{ + "upgradesuspensionwindow", + }, + nil, +) + +var upgradeSuspensionWindowMatchingConfigsDesc = prometheus.NewDesc( + MetricsNamespace+"_upgradesuspensionwindow_matching_config", + "Matching UpgradeConfigs for the suspension window", + []string{ + "upgradesuspensionwindow", + "config", + }, + nil, +) + var upgradeConfigNextPossibleScheduleDesc = prometheus.NewDesc( MetricsNamespace+"_upgradeconfig_next_possible_schedule_timestamp_seconds", "The value of the time field of the next possible schedule for an upgrade.", @@ -123,6 +161,10 @@ func (*UpgradeInformationCollector) Describe(ch chan<- *prometheus.Desc) { ch <- jobStartBeforeDesc ch <- upgradeConfigInfoDesc ch <- upgradeConfigNextPossibleScheduleDesc + ch <- upgradeSuspensionWindowInfoDesc + ch <- upgradeSuspensionWindowStartDesc + ch <- upgradeSuspensionWindowEndDesc + ch <- upgradeSuspensionWindowMatchingConfigsDesc } // Collect implements prometheus.Collector. @@ -172,6 +214,45 @@ func (m *UpgradeInformationCollector) Collect(ch chan<- prometheus.Metric) { ) } + var windows managedupgradev1beta1.UpgradeSuspensionWindowList + if err := m.Client.List(ctx, &windows); err != nil { + ch <- prometheus.NewInvalidMetric(upgradeSuspensionWindowInfoDesc, fmt.Errorf("failed to list upgrade suspension windows: %w", err)) + ch <- prometheus.NewInvalidMetric(upgradeSuspensionWindowStartDesc, fmt.Errorf("failed to list upgrade suspension windows: %w", err)) + ch <- prometheus.NewInvalidMetric(upgradeSuspensionWindowEndDesc, fmt.Errorf("failed to list upgrade suspension windows: %w", err)) + ch <- prometheus.NewInvalidMetric(upgradeSuspensionWindowMatchingConfigsDesc, fmt.Errorf("failed to list upgrade suspension windows: %w", err)) + } else { + for _, window := range windows.Items { + ch <- prometheus.MustNewConstMetric( + upgradeSuspensionWindowInfoDesc, + prometheus.GaugeValue, + 1, + window.Name, + window.Spec.Reason, + ) + ch <- prometheus.MustNewConstMetric( + upgradeSuspensionWindowStartDesc, + prometheus.GaugeValue, + float64(window.Spec.Start.Time.Unix()), + window.Name, + ) + ch <- prometheus.MustNewConstMetric( + upgradeSuspensionWindowEndDesc, + prometheus.GaugeValue, + float64(window.Spec.End.Time.Unix()), + window.Name, + ) + for _, config := range window.Status.MatchingConfigs { + ch <- prometheus.MustNewConstMetric( + upgradeSuspensionWindowMatchingConfigsDesc, + prometheus.GaugeValue, + 1, + window.Name, + config.Name, + ) + } + } + } + var configs managedupgradev1beta1.UpgradeConfigList if err := m.Client.List(ctx, &configs); err != nil { ch <- prometheus.NewInvalidMetric(upgradeConfigNextPossibleScheduleDesc, fmt.Errorf("failed to list upgrade jobs: %w", err)) diff --git a/controllers/upgrade_information_collector_test.go b/controllers/upgrade_information_collector_test.go index d71cb6a..e8427ad 100644 --- a/controllers/upgrade_information_collector_test.go +++ b/controllers/upgrade_information_collector_test.go @@ -262,6 +262,63 @@ func Test_ClusterUpgradingMetric(t *testing.T) { ) } +func Test_UpgradeSuspensionWindowMetric(t *testing.T) { + expectedMetricNames := []string{ + "openshift_upgrade_controller_upgradesuspensionwindow_info", + "openshift_upgrade_controller_upgradesuspensionwindow_start_timestamp_seconds", + "openshift_upgrade_controller_upgradesuspensionwindow_end_timestamp_seconds", + "openshift_upgrade_controller_upgradesuspensionwindow_matching_config", + } + + version := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "version", + }, + } + usw := &managedupgradev1beta1.UpgradeSuspensionWindow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mywindow", + }, + Spec: managedupgradev1beta1.UpgradeSuspensionWindowSpec{ + Start: metav1.NewTime(time.Date(2022, 3, 1, 0, 0, 0, 0, time.UTC)), + End: metav1.NewTime(time.Date(2022, 6, 1, 0, 0, 0, 0, time.UTC)), + Reason: "No moar upgrades", + }, + Status: managedupgradev1beta1.UpgradeSuspensionWindowStatus{ + MatchingConfigs: []managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject{ + {Name: "matching1"}, + {Name: "matching2"}, + }, + }, + } + c := controllerClient(t, version, usw) + subject := &UpgradeInformationCollector{ + Client: c, + + ManagedUpstreamClusterVersionName: "version", + } + + metrics := ` +# HELP openshift_upgrade_controller_upgradesuspensionwindow_info Information about the upgradesuspensionwindow object +# TYPE openshift_upgrade_controller_upgradesuspensionwindow_info gauge +openshift_upgrade_controller_upgradesuspensionwindow_info{reason="No moar upgrades",upgradesuspensionwindow="mywindow"} 1 +# HELP openshift_upgrade_controller_upgradesuspensionwindow_matching_config Matching UpgradeConfigs for the suspension window +# TYPE openshift_upgrade_controller_upgradesuspensionwindow_matching_config gauge +openshift_upgrade_controller_upgradesuspensionwindow_matching_config{config="matching1",upgradesuspensionwindow="mywindow"} 1 +openshift_upgrade_controller_upgradesuspensionwindow_matching_config{config="matching2",upgradesuspensionwindow="mywindow"} 1 +# HELP openshift_upgrade_controller_upgradesuspensionwindow_start_timestamp_seconds The value of the start field of the suspension window. +# TYPE openshift_upgrade_controller_upgradesuspensionwindow_start_timestamp_seconds gauge +openshift_upgrade_controller_upgradesuspensionwindow_start_timestamp_seconds{upgradesuspensionwindow="mywindow"} 1.6460928e+09 +# HELP openshift_upgrade_controller_upgradesuspensionwindow_end_timestamp_seconds The value of the start field of the suspension window. +# TYPE openshift_upgrade_controller_upgradesuspensionwindow_end_timestamp_seconds gauge +openshift_upgrade_controller_upgradesuspensionwindow_end_timestamp_seconds{upgradesuspensionwindow="mywindow"} 1.6540416e+09 +` + + require.NoError(t, + testutil.CollectAndCompare(subject, strings.NewReader(metrics), expectedMetricNames...), + ) +} + func Test_UpgradeConfigMetric(t *testing.T) { expectedMetricNames := []string{ "openshift_upgrade_controller_upgradeconfig_info", diff --git a/controllers/upgradesuspensionwindow_controller.go b/controllers/upgradesuspensionwindow_controller.go new file mode 100644 index 0000000..472ef83 --- /dev/null +++ b/controllers/upgradesuspensionwindow_controller.go @@ -0,0 +1,91 @@ +package controllers + +import ( + "context" + "fmt" + "slices" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + managedupgradev1beta1 "github.com/appuio/openshift-upgrade-controller/api/v1beta1" +) + +// UpgradeSuspensionWindowReconciler reconciles a UpgradeConfig object +type UpgradeSuspensionWindowReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradesuspensionwindows,verbs=get;list;watch +//+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradesuspensionwindows/status,verbs=get;update;patch + +// Reconcile implements the reconcile loop for UpgradeSuspensionWindow. +// It writes the list of matching UpgradeConfigs and UpgradeJobs to the status. +func (r *UpgradeSuspensionWindowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx).WithName("UpgradeSuspensionWindowReconciler.Reconcile") + l.Info("Reconciling UpgradeSuspensionWindow") + + var usw managedupgradev1beta1.UpgradeSuspensionWindow + if err := r.Get(ctx, req.NamespacedName, &usw); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + if !usw.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + cSel, err := metav1.LabelSelectorAsSelector(usw.Spec.ConfigSelector) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to parse config selector: %w", err) + } + var configs managedupgradev1beta1.UpgradeConfigList + if err := r.List(ctx, &configs, client.MatchingLabelsSelector{Selector: cSel}); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list matching UpgradeConfigs: %w", err) + } + + jSel, err := metav1.LabelSelectorAsSelector(usw.Spec.JobSelector) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to parse job selector: %w", err) + } + var jobs managedupgradev1beta1.UpgradeJobList + if err := r.List(ctx, &jobs, client.MatchingLabelsSelector{Selector: jSel}); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list matching UpgradeJobs: %w", err) + } + + configNames := make([]managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject, len(configs.Items)) + for i, config := range configs.Items { + configNames[i] = managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject{Name: config.Name} + } + slices.SortFunc(configNames, func(a, b managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject) int { + return strings.Compare(a.Name, b.Name) + }) + usw.Status.MatchingConfigs = configNames + + jobNames := make([]managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject, 0, len(jobs.Items)) + for _, job := range jobs.Items { + if !job.Spec.StartAfter.Time.Before(usw.Spec.Start.Time) && !job.Spec.StartAfter.Time.After(usw.Spec.End.Time) { + jobNames = append(jobNames, managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject{Name: job.Name}) + } + } + slices.SortFunc(jobNames, func(a, b managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject) int { + return strings.Compare(a.Name, b.Name) + }) + usw.Status.MatchingJobs = jobNames + + if err := r.Status().Update(ctx, &usw); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update status: %w", err) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *UpgradeSuspensionWindowReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&managedupgradev1beta1.UpgradeSuspensionWindow{}). + Complete(r) +} diff --git a/controllers/upgradesuspensionwindow_controller_test.go b/controllers/upgradesuspensionwindow_controller_test.go new file mode 100644 index 0000000..c156fb0 --- /dev/null +++ b/controllers/upgradesuspensionwindow_controller_test.go @@ -0,0 +1,123 @@ +package controllers + +import ( + "context" + "testing" + "time" + + managedupgradev1beta1 "github.com/appuio/openshift-upgrade-controller/api/v1beta1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func Test_UpgradeSuspensionWindowReconciler_Reconcile(t *testing.T) { + ctx := context.Background() + + j1 := &managedupgradev1beta1.UpgradeJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "before", + Namespace: "testns", + Labels: map[string]string{ + "test": "test", + }, + }, + Spec: managedupgradev1beta1.UpgradeJobSpec{ + StartAfter: metav1.NewTime(time.Date(2022, 2, 1, 0, 0, 0, 0, time.UTC)), + }, + } + j2 := &managedupgradev1beta1.UpgradeJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "during", + Namespace: "testns", + Labels: map[string]string{ + "test": "test", + }, + }, + Spec: managedupgradev1beta1.UpgradeJobSpec{ + StartAfter: metav1.NewTime(time.Date(2022, 4, 1, 0, 0, 0, 0, time.UTC)), + }, + } + j3 := &managedupgradev1beta1.UpgradeJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "after", + Namespace: "testns", + Labels: map[string]string{ + "test": "test", + }, + }, + Spec: managedupgradev1beta1.UpgradeJobSpec{ + StartAfter: metav1.NewTime(time.Date(2022, 7, 1, 0, 0, 0, 0, time.UTC)), + }, + } + j4 := &managedupgradev1beta1.UpgradeJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "during-wrong-label", + Namespace: "testns", + Labels: map[string]string{ + "other": "other", + }, + }, + Spec: managedupgradev1beta1.UpgradeJobSpec{ + StartAfter: metav1.NewTime(time.Date(2022, 4, 1, 0, 0, 0, 0, time.UTC)), + }, + } + + cnf1 := &managedupgradev1beta1.UpgradeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "matching", + Namespace: "testns", + Labels: map[string]string{ + "test": "test", + }, + }, + } + cnf2 := &managedupgradev1beta1.UpgradeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-matching", + Namespace: "testns", + Labels: map[string]string{ + "other": "other", + }, + }, + } + + usw := &managedupgradev1beta1.UpgradeSuspensionWindow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "testns", + }, + Spec: managedupgradev1beta1.UpgradeSuspensionWindowSpec{ + Start: metav1.NewTime(time.Date(2022, 3, 1, 0, 0, 0, 0, time.UTC)), + End: metav1.NewTime(time.Date(2022, 6, 1, 0, 0, 0, 0, time.UTC)), + + ConfigSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + JobSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + }, + } + + c := controllerClient(t, usw, j1, j2, j3, j4, cnf1, cnf2) + + subject := UpgradeSuspensionWindowReconciler{ + Client: c, + Scheme: c.Scheme(), + } + + _, err := subject.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(usw)}) + require.NoError(t, err) + + require.NoError(t, c.Get(ctx, client.ObjectKeyFromObject(usw), usw)) + + assert.Equal(t, []managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject{{Name: "matching"}}, usw.Status.MatchingConfigs) + assert.Equal(t, []managedupgradev1beta1.UpgradeSuspensionWindowStatusMatchingObject{{Name: "during"}}, usw.Status.MatchingJobs) +} diff --git a/main.go b/main.go index 918324a..0d0fda4 100644 --- a/main.go +++ b/main.go @@ -165,6 +165,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "UpgradeConfig") os.Exit(1) } + if err = (&controllers.UpgradeSuspensionWindowReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "UpgradeSuspensionWindow") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {