diff --git a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go index 8ca3e0a2..3e097082 100644 --- a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go +++ b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go @@ -3,6 +3,7 @@ package agentdeploy import ( "context" "fmt" + "regexp" "strings" appsv1 "k8s.io/api/apps/v1" @@ -167,17 +168,38 @@ func (s *healthCheckSyncer) probeAddonStatusByWorks( manifestConditions = append(manifestConditions, work.Status.ResourceStatus.Manifests...) } - probeFields, healthChecker, err := s.analyzeWorkProber(s.agentAddon, cluster, addon) + probeFields, healthChecker, healthAllChecker, err := s.analyzeWorkProber(s.agentAddon, cluster, addon) if err != nil { // should not happen, return return err } + var resultFields []agent.ResultField + for _, field := range probeFields { - result := findResultByIdentifier(field.ResourceIdentifier, manifestConditions) + results := findResultsByIdentifier(field.ResourceIdentifier, manifestConditions) + + // healthChecker will be ignored if healthAllChecker is set + if healthAllChecker != nil { + if len(results) != 0 { + resultFields = append(resultFields, results...) + } + continue + } + + if healthChecker == nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, + Message: fmt.Sprintf("health checker function is not set %v", err), + }) + return nil + } + // if no results are returned. it is possible that work agent has not returned the feedback value. // mark condition to unknown - if result == nil { + if len(results) == 0 { meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, Status: metav1.ConditionUnknown, @@ -189,16 +211,29 @@ func (s *healthCheckSyncer) probeAddonStatusByWorks( return nil } - err := healthChecker(field.ResourceIdentifier, *result) - if err != nil { - meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, - Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, - Message: fmt.Sprintf("Probe addon unavailable with err %v", err), - }) - return nil + for _, result := range results { + err := healthChecker(result.ResourceIdentifier, result.FeedbackResult) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, + Message: fmt.Sprintf("Probe addon unavailable with err %v", err), + }) + return nil + } } + + } + + if healthAllChecker != nil && healthAllChecker(resultFields) != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, + Message: fmt.Sprintf("Probe addon unavailable with err %v", err), + }) + return nil } meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ @@ -214,21 +249,23 @@ func (s *healthCheckSyncer) analyzeWorkProber( agentAddon agent.AgentAddon, cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, -) ([]agent.ProbeField, agent.AddonHealthCheckFunc, error) { +) ([]agent.ProbeField, agent.AddonHealthCheckFunc, agent.AddonHealthCheckAllFunc, error) { switch agentAddon.GetAgentAddonOptions().HealthProber.Type { case agent.HealthProberTypeWork: workProber := agentAddon.GetAgentAddonOptions().HealthProber.WorkProber if workProber != nil { - return workProber.ProbeFields, workProber.HealthCheck, nil + return workProber.ProbeFields, workProber.HealthCheck, workProber.HealthCheckAll, nil } - return nil, nil, fmt.Errorf("work prober is not configured") + return nil, nil, nil, fmt.Errorf("work prober is not configured") case agent.HealthProberTypeDeploymentAvailability: - return s.analyzeDeploymentWorkProber(agentAddon, cluster, addon) + probeFields, heathChecker, err := s.analyzeDeploymentWorkProber(agentAddon, cluster, addon) + return probeFields, heathChecker, nil, err case agent.HealthProberTypeWorkloadAvailability: - return s.analyzeWorkloadsWorkProber(agentAddon, cluster, addon) + probeFields, heathChecker, err := s.analyzeWorkloadsWorkProber(agentAddon, cluster, addon) + return probeFields, heathChecker, nil, err default: - return nil, nil, fmt.Errorf("unsupported health prober type %s", agentAddon.GetAgentAddonOptions().HealthProber.Type) + return nil, nil, nil, fmt.Errorf("unsupported health prober type %s", agentAddon.GetAgentAddonOptions().HealthProber.Type) } } @@ -294,27 +331,46 @@ func (s *healthCheckSyncer) analyzeWorkloadsWorkProber( return probeFields, utils.WorkloadAvailabilityHealthCheck, nil } -func findResultByIdentifier(identifier workapiv1.ResourceIdentifier, manifestConditions []workapiv1.ManifestCondition) *workapiv1.StatusFeedbackResult { +func findResultsByIdentifier(identifier workapiv1.ResourceIdentifier, + manifestConditions []workapiv1.ManifestCondition) []agent.ResultField { + var results []agent.ResultField for _, status := range manifestConditions { - if identifier.Group != status.ResourceMeta.Group { - continue - } - if identifier.Resource != status.ResourceMeta.Resource { - continue - } - if identifier.Name != status.ResourceMeta.Name { - continue - } - if identifier.Namespace != status.ResourceMeta.Namespace { - continue + if resourceMatch(status.ResourceMeta, identifier) && len(status.StatusFeedbacks.Values) != 0 { + results = append(results, agent.ResultField{ + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: status.ResourceMeta.Group, + Resource: status.ResourceMeta.Resource, + Name: status.ResourceMeta.Name, + Namespace: status.ResourceMeta.Namespace, + }, + FeedbackResult: status.StatusFeedbacks, + }) } + } - if len(status.StatusFeedbacks.Values) == 0 { - return nil - } + return results +} - return &status.StatusFeedbacks +// compare two string, target may include * +func wildcardMatch(resource, target string) bool { + if resource == target || target == "*" { + return true } - return nil + pattern := "^" + regexp.QuoteMeta(target) + "$" + pattern = strings.ReplaceAll(pattern, "\\*", ".*") + + re, err := regexp.Compile(pattern) + if err != nil { + return false + } + + return re.MatchString(resource) +} + +func resourceMatch(resourceMeta workapiv1.ManifestResourceMeta, resource workapiv1.ResourceIdentifier) bool { + return resourceMeta.Group == resource.Group && + resourceMeta.Resource == resource.Resource && + wildcardMatch(resourceMeta.Namespace, resource.Namespace) && + wildcardMatch(resourceMeta.Name, resource.Name) } diff --git a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go index 8bd18581..864d816e 100644 --- a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go +++ b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go @@ -2,6 +2,7 @@ package agentdeploy import ( "context" + "fmt" "testing" "time" @@ -362,7 +363,163 @@ func TestHealthCheckReconcile(t *testing.T) { Message: "test add-on is available.", }, }, - + { + name: "Health check mode is work and WorkProber check pass with addonHealthCheckAllFunc", + testAddon: &healthCheckTestAgent{name: "test", + health: newDeploymentsCheckAllProber(types.NamespacedName{Name: "test-deployment0", Namespace: "default"}, + types.NamespacedName{Name: "test-deployment1", Namespace: "default"}), + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + existingWork: []runtime.Object{ + &v1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-test-deploy-01", + Namespace: "cluster1", + Labels: map[string]string{ + "open-cluster-management.io/addon-name": "test", + }, + }, + Spec: v1.ManifestWorkSpec{}, + Status: v1.ManifestWorkStatus{ + ResourceStatus: v1.ManifestResourceStatus{ + Manifests: []v1.ManifestCondition{ + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment0", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{ + Values: []v1.FeedbackValue{ + { + Name: "Replicas", + Value: v1.FieldValue{ + Integer: boolPtr(1), + }, + }, + { + Name: "ReadyReplicas", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + }, + }, + }, + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment1", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: v1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeAvailable, + Message: "test add-on is available.", + }, + }, + { + name: "Health check mode is work and WorkProber check pass with addonHealthCheckAllFunc and wildcard", + testAddon: &healthCheckTestAgent{name: "test", + health: newDeploymentsCheckAllProber(types.NamespacedName{Name: "*", Namespace: "*"}), + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + existingWork: []runtime.Object{ + &v1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-test-deploy-01", + Namespace: "cluster1", + Labels: map[string]string{ + "open-cluster-management.io/addon-name": "test", + }, + }, + Spec: v1.ManifestWorkSpec{}, + Status: v1.ManifestWorkStatus{ + ResourceStatus: v1.ManifestResourceStatus{ + Manifests: []v1.ManifestCondition{ + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment0", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{ + Values: []v1.FeedbackValue{ + { + Name: "Replicas", + Value: v1.FieldValue{ + Integer: boolPtr(1), + }, + }, + { + Name: "ReadyReplicas", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + }, + }, + }, + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment1", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: v1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeAvailable, + Message: "test add-on is available.", + }, + }, { name: "Health check mode is deployment availability but manifestApplied condition is not true", testAddon: &healthCheckTestAgent{name: "test", @@ -929,3 +1086,34 @@ func TestHealthCheckReconcile(t *testing.T) { }) } } + +func addonHealthCheckAllFunc(resultFields []agent.ResultField) error { + for _, field := range resultFields { + switch field.ResourceIdentifier.Resource { + case "deployments": + err := utils.DeploymentAvailabilityHealthCheck(field.ResourceIdentifier, field.FeedbackResult) + if err == nil { + return nil + } + } + } + return fmt.Errorf("not meet the results") +} + +func newDeploymentsCheckAllProber(deployments ...types.NamespacedName) *agent.HealthProber { + probeFields := []agent.ProbeField{} + for _, deploy := range deployments { + mc := utils.DeploymentWellKnowManifestConfig(deploy.Namespace, deploy.Name) + probeFields = append(probeFields, agent.ProbeField{ + ResourceIdentifier: mc.ResourceIdentifier, + ProbeRules: mc.FeedbackRules, + }) + } + return &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + WorkProber: &agent.WorkHealthProber{ + ProbeFields: probeFields, + HealthCheckAll: addonHealthCheckAllFunc, + }, + } +} diff --git a/pkg/agent/inteface.go b/pkg/agent/inteface.go index d2861cb6..064b724f 100644 --- a/pkg/agent/inteface.go +++ b/pkg/agent/inteface.go @@ -169,13 +169,18 @@ type HealthProber struct { } type AddonHealthCheckFunc func(workapiv1.ResourceIdentifier, workapiv1.StatusFeedbackResult) error +type AddonHealthCheckAllFunc func([]ResultField) error type WorkHealthProber struct { // ProbeFields tells addon framework what field to probe ProbeFields []ProbeField - // HealthCheck check status of the addon based on probe result. + // HealthCheck check status of the addon based on each probeField result. + // HealthCheck will be ignored if HealthCheckAll is set. HealthCheck AddonHealthCheckFunc + + // HealthCheckAll check status of the addon based of all results of probeFields + HealthCheckAll AddonHealthCheckAllFunc } // ProbeField defines the field of a resource to be probed @@ -187,6 +192,15 @@ type ProbeField struct { ProbeRules []workapiv1.FeedbackRule } +// ResultField defines the field of the result of a resource +type ResultField struct { + // ResourceIdentifier sets what resource of the FeedbackResult + ResourceIdentifier workapiv1.ResourceIdentifier + + // feedbackResult is the StatusFeedbackResult of the resource + FeedbackResult workapiv1.StatusFeedbackResult +} + type HealthProberType string const ( diff --git a/pkg/utils/probe_helper.go b/pkg/utils/probe_helper.go index 0d782a13..06be402c 100644 --- a/pkg/utils/probe_helper.go +++ b/pkg/utils/probe_helper.go @@ -37,6 +37,32 @@ func NewDeploymentProber(deployments ...types.NamespacedName) *agent.HealthProbe } } +func NewAllDeploymentsProber() *agent.HealthProber { + probeFields := []agent.ProbeField{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Name: "*", + Namespace: "*", + }, + ProbeRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + } + + return &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + WorkProber: &agent.WorkHealthProber{ + ProbeFields: probeFields, + HealthCheckAll: AllDeploymentsAvailabilityHealthCheck, + }, + } +} + func (d *DeploymentProber) ProbeFields() []agent.ProbeField { probeFields := []agent.ProbeField{} for _, deploy := range d.deployments { @@ -62,6 +88,19 @@ func DeploymentAvailabilityHealthCheck(identifier workapiv1.ResourceIdentifier, return WorkloadAvailabilityHealthCheck(identifier, result) } +func AllDeploymentsAvailabilityHealthCheck(results []agent.ResultField) error { + if len(results) < 2 { + return fmt.Errorf("all deployments are not available") + } + + for _, result := range results { + if err := WorkloadAvailabilityHealthCheck(result.ResourceIdentifier, result.FeedbackResult); err != nil { + return err + } + } + return nil +} + func WorkloadAvailabilityHealthCheck(identifier workapiv1.ResourceIdentifier, result workapiv1.StatusFeedbackResult) error { // only support deployments and daemonsets for now diff --git a/test/integration/kube/agent_deploy_test.go b/test/integration/kube/agent_deploy_test.go index 3eccc657..dce59ee4 100644 --- a/test/integration/kube/agent_deploy_test.go +++ b/test/integration/kube/agent_deploy_test.go @@ -444,6 +444,219 @@ var _ = ginkgo.Describe("Agent deploy", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) + ginkgo.FIt("Should deploy agents and get available with prober func including wildcard", func() { + deploy1 := &unstructured.Unstructured{} + err := deploy1.UnmarshalJSON([]byte(deploymentJson)) + deploy1.SetName("deployment1") + deploy1.SetNamespace("ns1") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + deploy2 := &unstructured.Unstructured{} + err = deploy2.UnmarshalJSON([]byte(deploymentJson)) + deploy2.SetName("deployment2") + deploy2.SetNamespace("ns2") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{deploy1, deploy2} + testAddonImpl.prober = utils.NewAllDeploymentsProber() + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + gomega.Eventually(func() error { + work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Spec.Workload.Manifests) != 2 { + return fmt.Errorf("unexpected number of work manifests") + } + + if len(work.Spec.ManifestConfigs) != 1 { + return fmt.Errorf("unexpected number of work manifests configuration") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable"}) + + replica := int64(1) + + // update work status to a wrong feedback status + work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{ + Manifests: []workapiv1.ManifestCondition{ + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "deployment1", + Namespace: "ns1", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 1, + Group: "apps", + Resource: "deployments", + Name: "deployment2", + Namespace: "ns2", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{}, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionFalse, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionFalse(addon.Status.Conditions, "Available") { + return fmt.Errorf("unexpected addon available condition, %#v", addon.Status) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update to the correct condition + work, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{ + Manifests: []workapiv1.ManifestCondition{ + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "deployment1", + Namespace: "ns1", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 1, + Group: "apps", + Resource: "deployments", + Name: "deployment2", + Namespace: "ns2", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + return fmt.Errorf("unexpected addon available condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) ginkgo.It("Should deploy agent and get available with deployment availability prober func", func() { obj := &unstructured.Unstructured{}