diff --git a/cmd/example/helloworld_helm/main.go b/cmd/example/helloworld_helm/main.go index 09952322..0cb4cda1 100644 --- a/cmd/example/helloworld_helm/main.go +++ b/cmd/example/helloworld_helm/main.go @@ -132,7 +132,7 @@ func runController(ctx context.Context, kubeConfig *rest.Config) error { utils.AgentInstallNamespaceFromDeploymentConfigFunc( utils.NewAddOnDeploymentConfigGetter(addonClient), ), - ). + ).WithAgentHealthProber(helloworld_helm.AgentHealthProber()). BuildHelmAgentAddon() if err != nil { klog.Errorf("failed to build agent %v", err) diff --git a/examples/helloworld_helm/helloworld_helm.go b/examples/helloworld_helm/helloworld_helm.go index b10af772..5d7ced49 100644 --- a/examples/helloworld_helm/helloworld_helm.go +++ b/examples/helloworld_helm/helloworld_helm.go @@ -4,6 +4,8 @@ import ( "context" "embed" "fmt" + "open-cluster-management.io/addon-framework/pkg/agent" + workapiv1 "open-cluster-management.io/api/work/v1" "os" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -112,3 +114,42 @@ func GetImageValues(kubeClient kubernetes.Interface) addonfactory.GetValuesFunc return overrideValues, nil } } + +func AgentHealthProber() *agent.HealthProber { + return &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + WorkProber: &agent.WorkHealthProber{ + ProbeFields: []agent.ProbeField{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Name: "*", + Namespace: "*", + }, + ProbeRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + }, + HealthCheckAll: func(fields []agent.ResultField) error { + for _, field := range fields { + if len(field.FeedbackResult.Values) == 0 { + return fmt.Errorf("no helloworldhelmhm agent") + } + switch field.ResourceIdentifier.Name { + case "helloworldhelm-agent": + for _, value := range field.FeedbackResult.Values { + if value.Name == "AvailableReplicas" && *value.Value.Integer != 1 { + return nil + } + } + } + } + return fmt.Errorf("helloworldhelmhm agent is not ready") + }, + }, + } +} diff --git a/go.mod b/go.mod index af201792..0a06e548 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( k8s.io/component-base v0.30.2 k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 - open-cluster-management.io/api v0.15.0 + open-cluster-management.io/api v0.15.1-0.20241120090202-cb7ce98ab874 open-cluster-management.io/sdk-go v0.15.0 sigs.k8s.io/controller-runtime v0.18.4 ) diff --git a/go.sum b/go.sum index 23aafa99..77f2e11f 100644 --- a/go.sum +++ b/go.sum @@ -474,8 +474,8 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -open-cluster-management.io/api v0.15.0 h1:lRee1KOlGHZb2scTA7ff9E9Fxt2hJc7jpkHnaCbvkOU= -open-cluster-management.io/api v0.15.0/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= +open-cluster-management.io/api v0.15.1-0.20241120090202-cb7ce98ab874 h1:WgkuYXTbJV7EK+qtiMq3soa21faGUKeTG5w0C8Mn1Ok= +open-cluster-management.io/api v0.15.1-0.20241120090202-cb7ce98ab874/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= open-cluster-management.io/sdk-go v0.15.0 h1:2IAJnPfUoY6rPC5w7LhqAnvIlgekPoVW03LdZO1unIM= open-cluster-management.io/sdk-go v0.15.0/go.mod h1:fi5WBsbC5K3txKb8eRLuP0Sim/Oqz/PHX18skAEyjiA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= diff --git a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go index 8ca3e0a2..3e097082 100644 --- a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go +++ b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go @@ -3,6 +3,7 @@ package agentdeploy import ( "context" "fmt" + "regexp" "strings" appsv1 "k8s.io/api/apps/v1" @@ -167,17 +168,38 @@ func (s *healthCheckSyncer) probeAddonStatusByWorks( manifestConditions = append(manifestConditions, work.Status.ResourceStatus.Manifests...) } - probeFields, healthChecker, err := s.analyzeWorkProber(s.agentAddon, cluster, addon) + probeFields, healthChecker, healthAllChecker, err := s.analyzeWorkProber(s.agentAddon, cluster, addon) if err != nil { // should not happen, return return err } + var resultFields []agent.ResultField + for _, field := range probeFields { - result := findResultByIdentifier(field.ResourceIdentifier, manifestConditions) + results := findResultsByIdentifier(field.ResourceIdentifier, manifestConditions) + + // healthChecker will be ignored if healthAllChecker is set + if healthAllChecker != nil { + if len(results) != 0 { + resultFields = append(resultFields, results...) + } + continue + } + + if healthChecker == nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, + Message: fmt.Sprintf("health checker function is not set %v", err), + }) + return nil + } + // if no results are returned. it is possible that work agent has not returned the feedback value. // mark condition to unknown - if result == nil { + if len(results) == 0 { meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, Status: metav1.ConditionUnknown, @@ -189,16 +211,29 @@ func (s *healthCheckSyncer) probeAddonStatusByWorks( return nil } - err := healthChecker(field.ResourceIdentifier, *result) - if err != nil { - meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, - Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, - Message: fmt.Sprintf("Probe addon unavailable with err %v", err), - }) - return nil + for _, result := range results { + err := healthChecker(result.ResourceIdentifier, result.FeedbackResult) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, + Message: fmt.Sprintf("Probe addon unavailable with err %v", err), + }) + return nil + } } + + } + + if healthAllChecker != nil && healthAllChecker(resultFields) != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, + Message: fmt.Sprintf("Probe addon unavailable with err %v", err), + }) + return nil } meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ @@ -214,21 +249,23 @@ func (s *healthCheckSyncer) analyzeWorkProber( agentAddon agent.AgentAddon, cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, -) ([]agent.ProbeField, agent.AddonHealthCheckFunc, error) { +) ([]agent.ProbeField, agent.AddonHealthCheckFunc, agent.AddonHealthCheckAllFunc, error) { switch agentAddon.GetAgentAddonOptions().HealthProber.Type { case agent.HealthProberTypeWork: workProber := agentAddon.GetAgentAddonOptions().HealthProber.WorkProber if workProber != nil { - return workProber.ProbeFields, workProber.HealthCheck, nil + return workProber.ProbeFields, workProber.HealthCheck, workProber.HealthCheckAll, nil } - return nil, nil, fmt.Errorf("work prober is not configured") + return nil, nil, nil, fmt.Errorf("work prober is not configured") case agent.HealthProberTypeDeploymentAvailability: - return s.analyzeDeploymentWorkProber(agentAddon, cluster, addon) + probeFields, heathChecker, err := s.analyzeDeploymentWorkProber(agentAddon, cluster, addon) + return probeFields, heathChecker, nil, err case agent.HealthProberTypeWorkloadAvailability: - return s.analyzeWorkloadsWorkProber(agentAddon, cluster, addon) + probeFields, heathChecker, err := s.analyzeWorkloadsWorkProber(agentAddon, cluster, addon) + return probeFields, heathChecker, nil, err default: - return nil, nil, fmt.Errorf("unsupported health prober type %s", agentAddon.GetAgentAddonOptions().HealthProber.Type) + return nil, nil, nil, fmt.Errorf("unsupported health prober type %s", agentAddon.GetAgentAddonOptions().HealthProber.Type) } } @@ -294,27 +331,46 @@ func (s *healthCheckSyncer) analyzeWorkloadsWorkProber( return probeFields, utils.WorkloadAvailabilityHealthCheck, nil } -func findResultByIdentifier(identifier workapiv1.ResourceIdentifier, manifestConditions []workapiv1.ManifestCondition) *workapiv1.StatusFeedbackResult { +func findResultsByIdentifier(identifier workapiv1.ResourceIdentifier, + manifestConditions []workapiv1.ManifestCondition) []agent.ResultField { + var results []agent.ResultField for _, status := range manifestConditions { - if identifier.Group != status.ResourceMeta.Group { - continue - } - if identifier.Resource != status.ResourceMeta.Resource { - continue - } - if identifier.Name != status.ResourceMeta.Name { - continue - } - if identifier.Namespace != status.ResourceMeta.Namespace { - continue + if resourceMatch(status.ResourceMeta, identifier) && len(status.StatusFeedbacks.Values) != 0 { + results = append(results, agent.ResultField{ + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: status.ResourceMeta.Group, + Resource: status.ResourceMeta.Resource, + Name: status.ResourceMeta.Name, + Namespace: status.ResourceMeta.Namespace, + }, + FeedbackResult: status.StatusFeedbacks, + }) } + } - if len(status.StatusFeedbacks.Values) == 0 { - return nil - } + return results +} - return &status.StatusFeedbacks +// compare two string, target may include * +func wildcardMatch(resource, target string) bool { + if resource == target || target == "*" { + return true } - return nil + pattern := "^" + regexp.QuoteMeta(target) + "$" + pattern = strings.ReplaceAll(pattern, "\\*", ".*") + + re, err := regexp.Compile(pattern) + if err != nil { + return false + } + + return re.MatchString(resource) +} + +func resourceMatch(resourceMeta workapiv1.ManifestResourceMeta, resource workapiv1.ResourceIdentifier) bool { + return resourceMeta.Group == resource.Group && + resourceMeta.Resource == resource.Resource && + wildcardMatch(resourceMeta.Namespace, resource.Namespace) && + wildcardMatch(resourceMeta.Name, resource.Name) } diff --git a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go index 8bd18581..864d816e 100644 --- a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go +++ b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go @@ -2,6 +2,7 @@ package agentdeploy import ( "context" + "fmt" "testing" "time" @@ -362,7 +363,163 @@ func TestHealthCheckReconcile(t *testing.T) { Message: "test add-on is available.", }, }, - + { + name: "Health check mode is work and WorkProber check pass with addonHealthCheckAllFunc", + testAddon: &healthCheckTestAgent{name: "test", + health: newDeploymentsCheckAllProber(types.NamespacedName{Name: "test-deployment0", Namespace: "default"}, + types.NamespacedName{Name: "test-deployment1", Namespace: "default"}), + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + existingWork: []runtime.Object{ + &v1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-test-deploy-01", + Namespace: "cluster1", + Labels: map[string]string{ + "open-cluster-management.io/addon-name": "test", + }, + }, + Spec: v1.ManifestWorkSpec{}, + Status: v1.ManifestWorkStatus{ + ResourceStatus: v1.ManifestResourceStatus{ + Manifests: []v1.ManifestCondition{ + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment0", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{ + Values: []v1.FeedbackValue{ + { + Name: "Replicas", + Value: v1.FieldValue{ + Integer: boolPtr(1), + }, + }, + { + Name: "ReadyReplicas", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + }, + }, + }, + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment1", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: v1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeAvailable, + Message: "test add-on is available.", + }, + }, + { + name: "Health check mode is work and WorkProber check pass with addonHealthCheckAllFunc and wildcard", + testAddon: &healthCheckTestAgent{name: "test", + health: newDeploymentsCheckAllProber(types.NamespacedName{Name: "*", Namespace: "*"}), + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + existingWork: []runtime.Object{ + &v1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-test-deploy-01", + Namespace: "cluster1", + Labels: map[string]string{ + "open-cluster-management.io/addon-name": "test", + }, + }, + Spec: v1.ManifestWorkSpec{}, + Status: v1.ManifestWorkStatus{ + ResourceStatus: v1.ManifestResourceStatus{ + Manifests: []v1.ManifestCondition{ + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment0", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{ + Values: []v1.FeedbackValue{ + { + Name: "Replicas", + Value: v1.FieldValue{ + Integer: boolPtr(1), + }, + }, + { + Name: "ReadyReplicas", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + }, + }, + }, + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment1", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: v1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeAvailable, + Message: "test add-on is available.", + }, + }, { name: "Health check mode is deployment availability but manifestApplied condition is not true", testAddon: &healthCheckTestAgent{name: "test", @@ -929,3 +1086,34 @@ func TestHealthCheckReconcile(t *testing.T) { }) } } + +func addonHealthCheckAllFunc(resultFields []agent.ResultField) error { + for _, field := range resultFields { + switch field.ResourceIdentifier.Resource { + case "deployments": + err := utils.DeploymentAvailabilityHealthCheck(field.ResourceIdentifier, field.FeedbackResult) + if err == nil { + return nil + } + } + } + return fmt.Errorf("not meet the results") +} + +func newDeploymentsCheckAllProber(deployments ...types.NamespacedName) *agent.HealthProber { + probeFields := []agent.ProbeField{} + for _, deploy := range deployments { + mc := utils.DeploymentWellKnowManifestConfig(deploy.Namespace, deploy.Name) + probeFields = append(probeFields, agent.ProbeField{ + ResourceIdentifier: mc.ResourceIdentifier, + ProbeRules: mc.FeedbackRules, + }) + } + return &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + WorkProber: &agent.WorkHealthProber{ + ProbeFields: probeFields, + HealthCheckAll: addonHealthCheckAllFunc, + }, + } +} diff --git a/pkg/agent/inteface.go b/pkg/agent/inteface.go index d2861cb6..064b724f 100644 --- a/pkg/agent/inteface.go +++ b/pkg/agent/inteface.go @@ -169,13 +169,18 @@ type HealthProber struct { } type AddonHealthCheckFunc func(workapiv1.ResourceIdentifier, workapiv1.StatusFeedbackResult) error +type AddonHealthCheckAllFunc func([]ResultField) error type WorkHealthProber struct { // ProbeFields tells addon framework what field to probe ProbeFields []ProbeField - // HealthCheck check status of the addon based on probe result. + // HealthCheck check status of the addon based on each probeField result. + // HealthCheck will be ignored if HealthCheckAll is set. HealthCheck AddonHealthCheckFunc + + // HealthCheckAll check status of the addon based of all results of probeFields + HealthCheckAll AddonHealthCheckAllFunc } // ProbeField defines the field of a resource to be probed @@ -187,6 +192,15 @@ type ProbeField struct { ProbeRules []workapiv1.FeedbackRule } +// ResultField defines the field of the result of a resource +type ResultField struct { + // ResourceIdentifier sets what resource of the FeedbackResult + ResourceIdentifier workapiv1.ResourceIdentifier + + // feedbackResult is the StatusFeedbackResult of the resource + FeedbackResult workapiv1.StatusFeedbackResult +} + type HealthProberType string const ( diff --git a/pkg/utils/probe_helper.go b/pkg/utils/probe_helper.go index 0d782a13..06be402c 100644 --- a/pkg/utils/probe_helper.go +++ b/pkg/utils/probe_helper.go @@ -37,6 +37,32 @@ func NewDeploymentProber(deployments ...types.NamespacedName) *agent.HealthProbe } } +func NewAllDeploymentsProber() *agent.HealthProber { + probeFields := []agent.ProbeField{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Name: "*", + Namespace: "*", + }, + ProbeRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + } + + return &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + WorkProber: &agent.WorkHealthProber{ + ProbeFields: probeFields, + HealthCheckAll: AllDeploymentsAvailabilityHealthCheck, + }, + } +} + func (d *DeploymentProber) ProbeFields() []agent.ProbeField { probeFields := []agent.ProbeField{} for _, deploy := range d.deployments { @@ -62,6 +88,19 @@ func DeploymentAvailabilityHealthCheck(identifier workapiv1.ResourceIdentifier, return WorkloadAvailabilityHealthCheck(identifier, result) } +func AllDeploymentsAvailabilityHealthCheck(results []agent.ResultField) error { + if len(results) < 2 { + return fmt.Errorf("all deployments are not available") + } + + for _, result := range results { + if err := WorkloadAvailabilityHealthCheck(result.ResourceIdentifier, result.FeedbackResult); err != nil { + return err + } + } + return nil +} + func WorkloadAvailabilityHealthCheck(identifier workapiv1.ResourceIdentifier, result workapiv1.StatusFeedbackResult) error { // only support deployments and daemonsets for now diff --git a/test/integration/kube/agent_deploy_test.go b/test/integration/kube/agent_deploy_test.go index 3eccc657..19384538 100644 --- a/test/integration/kube/agent_deploy_test.go +++ b/test/integration/kube/agent_deploy_test.go @@ -444,6 +444,219 @@ var _ = ginkgo.Describe("Agent deploy", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) + ginkgo.It("Should deploy agents and get available with prober func including wildcard", func() { + deploy1 := &unstructured.Unstructured{} + err := deploy1.UnmarshalJSON([]byte(deploymentJson)) + deploy1.SetName("deployment1") + deploy1.SetNamespace("ns1") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + deploy2 := &unstructured.Unstructured{} + err = deploy2.UnmarshalJSON([]byte(deploymentJson)) + deploy2.SetName("deployment2") + deploy2.SetNamespace("ns2") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{deploy1, deploy2} + testAddonImpl.prober = utils.NewAllDeploymentsProber() + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + gomega.Eventually(func() error { + work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Spec.Workload.Manifests) != 2 { + return fmt.Errorf("unexpected number of work manifests") + } + + if len(work.Spec.ManifestConfigs) != 1 { + return fmt.Errorf("unexpected number of work manifests configuration") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable"}) + + replica := int64(1) + + // update work status to a wrong feedback status + work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{ + Manifests: []workapiv1.ManifestCondition{ + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "deployment1", + Namespace: "ns1", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 1, + Group: "apps", + Resource: "deployments", + Name: "deployment2", + Namespace: "ns2", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{}, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionFalse, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionFalse(addon.Status.Conditions, "Available") { + return fmt.Errorf("unexpected addon available condition, %#v", addon.Status) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update to the correct condition + work, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{ + Manifests: []workapiv1.ManifestCondition{ + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "deployment1", + Namespace: "ns1", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 1, + Group: "apps", + Resource: "deployments", + Name: "deployment2", + Namespace: "ns2", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + return fmt.Errorf("unexpected addon available condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) ginkgo.It("Should deploy agent and get available with deployment availability prober func", func() { obj := &unstructured.Unstructured{} diff --git a/vendor/modules.txt b/vendor/modules.txt index 05c46378..b3701b30 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1372,7 +1372,7 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# open-cluster-management.io/api v0.15.0 +# open-cluster-management.io/api v0.15.1-0.20241120090202-cb7ce98ab874 ## explicit; go 1.22.0 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml index 64d46da0..8c7718c8 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml @@ -203,6 +203,9 @@ spec: - path type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: description: |- Type defines the option of how status can be returned. diff --git a/vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml b/vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml index efb889f9..3542af39 100644 --- a/vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml +++ b/vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml @@ -187,6 +187,9 @@ spec: - path type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: description: |- Type defines the option of how status can be returned. diff --git a/vendor/open-cluster-management.io/api/work/v1/types.go b/vendor/open-cluster-management.io/api/work/v1/types.go index 0a66a837..08f560fd 100644 --- a/vendor/open-cluster-management.io/api/work/v1/types.go +++ b/vendor/open-cluster-management.io/api/work/v1/types.go @@ -226,6 +226,8 @@ type FeedbackRule struct { Type FeedBackType `json:"type"` // JsonPaths defines the json path under status field to be synced. + // +listType:=map + // +listMapKey:=name // +optional JsonPaths []JsonPath `json:"jsonPaths,omitempty"` } diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index 6acb6340..499146f2 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -210,6 +210,9 @@ spec: - path type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: description: |- Type defines the option of how status can be returned.