Skip to content

Commit

Permalink
Minor fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelawyu committed Jan 20, 2025
1 parent 4540d2f commit 30699b1
Show file tree
Hide file tree
Showing 4 changed files with 342 additions and 71 deletions.
4 changes: 2 additions & 2 deletions pkg/controllers/clusterresourceplacement/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -931,15 +931,15 @@ func (r *Reconciler) setPlacementStatus(
// For clusters that have been selected, set the resource placement status based on the
// respective resource binding status for each of them.
expectedCondTypes := determineExpectedCRPAndResourcePlacementStatusCondType(crp)
allRPS, rpsSetCondTypeCounter, err := r.setScheduledResourcePlacementStatuses(
allRPS, rpsSetCondTypeCounter, err := r.appendScheduledResourcePlacementStatuses(
ctx, allRPS, selected, expectedCondTypes, crp, latestSchedulingPolicySnapshot, latestResourceSnapshot)
if err != nil {
return false, err
}

// For clusters that failed to get scheduled, set a resource placement status with the
// failed to schedule condition for each of them.
allRPS = setFailedToScheduleResourcePlacementStatuses(allRPS, unselected, failedToScheduleClusterCount, klog.KObj(crp), crp.Generation)
allRPS = appendFailedToScheduleResourcePlacementStatuses(allRPS, unselected, failedToScheduleClusterCount, crp)

crp.Status.PlacementStatuses = allRPS

Expand Down
29 changes: 13 additions & 16 deletions pkg/controllers/clusterresourceplacement/placement_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,11 @@ func calculateFailedToScheduleClusterCount(crp *fleetv1beta1.ClusterResourcePlac
}

// setFailedToScheduleResourcePlacementStatuses sets the resource placement statuses for the failed to schedule clusters.
func setFailedToScheduleResourcePlacementStatuses(
func appendFailedToScheduleResourcePlacementStatuses(
allRPS []fleetv1beta1.ResourcePlacementStatus,
unselected []*fleetv1beta1.ClusterDecision,
failedToScheduleClusterCount int,
crpRef klog.ObjectRef, crpGeneration int64,
crp *fleetv1beta1.ClusterResourcePlacement,
) []fleetv1beta1.ResourcePlacementStatus {
// In the earlier step it has been guaranteed that failedToScheduleClusterCount is less than or equal to the
// total number of unselected clusters; here Fleet still performs a sanity check.
Expand All @@ -94,13 +94,13 @@ func setFailedToScheduleResourcePlacementStatuses(
Type: string(fleetv1beta1.ResourceScheduledConditionType),
Reason: ResourceScheduleFailedReason,
Message: unselected[i].Reason,
ObservedGeneration: crpGeneration,
ObservedGeneration: crp.Generation,
}
meta.SetStatusCondition(&rps.Conditions, failedToScheduleCond)
// The allRPS slice has been pre-allocated, so the append call will never produce a new
// slice; here, however, Fleet will still return the old slice just in case.
allRPS = append(allRPS, *rps)
klog.V(2).InfoS("Populated the resource placement status for the unscheduled cluster", "clusterResourcePlacement", crpRef, "cluster", unselected[i].ClusterName)
klog.V(2).InfoS("Populated the resource placement status for the unscheduled cluster", "clusterResourcePlacement", klog.KObj(crp), "cluster", unselected[i].ClusterName)
}

return allRPS
Expand All @@ -120,13 +120,13 @@ func determineExpectedCRPAndResourcePlacementStatusCondType(crp *fleetv1beta1.Cl
}

// setScheduledResourcePlacementStatuses sets the resource placement statuses for the scheduled clusters.
func (r *Reconciler) setScheduledResourcePlacementStatuses(
func (r *Reconciler) appendScheduledResourcePlacementStatuses(
ctx context.Context,
allRPS []fleetv1beta1.ResourcePlacementStatus,
selected []*fleetv1beta1.ClusterDecision,
expectedCondTypes []condition.ResourceCondition,
crp *fleetv1beta1.ClusterResourcePlacement,
lastestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot,
latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot,
latestClusterResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot,
) (
[]fleetv1beta1.ResourcePlacementStatus,
Expand All @@ -137,7 +137,7 @@ func (r *Reconciler) setScheduledResourcePlacementStatuses(
var rpsSetCondTypeCounter [condition.TotalCondition][condition.TotalConditionStatus]int

oldRPSMap := buildResourcePlacementStatusMap(crp)
bindingMap, err := r.buildClusterResourceBindings(ctx, crp, lastestSchedulingPolicySnapshot)
bindingMap, err := r.buildClusterResourceBindings(ctx, crp, latestSchedulingPolicySnapshot)
if err != nil {
return allRPS, rpsSetCondTypeCounter, err
}
Expand Down Expand Up @@ -167,10 +167,7 @@ func (r *Reconciler) setScheduledResourcePlacementStatuses(

// Prepare the new conditions.
binding := bindingMap[clusterDecision.ClusterName]
setStatusByCondType, err := r.setResourcePlacementStatusPerCluster(crp, latestClusterResourceSnapshot, binding, rps, expectedCondTypes)
if err != nil {
return allRPS, rpsSetCondTypeCounter, err
}
setStatusByCondType := r.setResourcePlacementStatusPerCluster(crp, latestClusterResourceSnapshot, binding, rps, expectedCondTypes)

// Update the counter.
for condType, condStatus := range setStatusByCondType {
Expand Down Expand Up @@ -304,14 +301,14 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(
binding *fleetv1beta1.ClusterResourceBinding,
status *fleetv1beta1.ResourcePlacementStatus,
expectedCondTypes []condition.ResourceCondition,
) (map[condition.ResourceCondition]metav1.ConditionStatus, error) {
) map[condition.ResourceCondition]metav1.ConditionStatus {
res := make(map[condition.ResourceCondition]metav1.ConditionStatus)
if binding == nil {
// The binding cannot be found; Fleet might be observing an in-between state where
// the cluster has been picked but the binding has not been created yet.
meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation))
res[condition.RolloutStartedCondition] = metav1.ConditionUnknown
return res, nil
return res
}

rolloutStartedCond := binding.GetCondition(string(condition.RolloutStartedCondition.ResourceBindingConditionType()))
Expand All @@ -328,14 +325,14 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(
}
meta.SetStatusCondition(&status.Conditions, cond)
res[condition.RolloutStartedCondition] = metav1.ConditionFalse
return res, nil
return res
case binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name:
// The binding uses an out of date resource snapshot, and the RolloutStarted condition is
// set to True, Unknown, or has become stale. Fleet might be observing an in-between state.
meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation))
klog.V(5).InfoS("The cluster resource binding has a stale RolloutStarted condition, or it links to an out of date resource snapshot yet has the RolloutStarted condition set to True or Unknown status", "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(crp))
res[condition.RolloutStartedCondition] = metav1.ConditionUnknown
return res, nil
return res
default:
// The binding uses the latest resource snapshot.
for _, i := range expectedCondTypes {
Expand Down Expand Up @@ -382,6 +379,6 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(
break // if the current condition is false, no need to populate the rest conditions
}
}
return res, nil
return res
}
}
Loading

0 comments on commit 30699b1

Please sign in to comment.