diff --git a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go index e9955d5d..992646e3 100644 --- a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go +++ b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go @@ -34,7 +34,8 @@ func (s *healthCheckSyncer) sync(ctx context.Context, } switch s.agentAddon.GetAgentAddonOptions().HealthProber.Type { - case agent.HealthProberTypeWork, agent.HealthProberTypeNone, agent.HealthProberTypeDeploymentAvailability: + case agent.HealthProberTypeWork, agent.HealthProberTypeNone, + agent.HealthProberTypeDeploymentAvailability, agent.HealthProberTypeWorkloadAvailability: expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeCustomized case agent.HealthProberTypeLease: expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeLease @@ -58,6 +59,8 @@ func (s *healthCheckSyncer) probeAddonStatus( return s.probeWorkAddonStatus(cluster, addon) case agent.HealthProberTypeDeploymentAvailability: return s.probeDeploymentAvailabilityAddonStatus(cluster, addon) + case agent.HealthProberTypeWorkloadAvailability: + return s.probeWorkloadAvailabilityAddonStatus(cluster, addon) default: return nil } @@ -89,8 +92,15 @@ func (s *healthCheckSyncer) probeWorkAddonStatus( func (s *healthCheckSyncer) probeDeploymentAvailabilityAddonStatus( cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { + return s.probeWorkloadAvailabilityAddonStatus(cluster, addon) +} + +func (s *healthCheckSyncer) probeWorkloadAvailabilityAddonStatus( + cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { - if s.agentAddon.GetAgentAddonOptions().HealthProber.Type != agent.HealthProberTypeDeploymentAvailability { + proberType := s.agentAddon.GetAgentAddonOptions().HealthProber.Type + if proberType != agent.HealthProberTypeDeploymentAvailability && + proberType != agent.HealthProberTypeWorkloadAvailability { return nil } @@ -212,6 +222,8 @@ func (s *healthCheckSyncer) analyzeWorkProber( return nil, nil, fmt.Errorf("work prober is not configured") case agent.HealthProberTypeDeploymentAvailability: return s.analyzeDeploymentWorkProber(agentAddon, cluster, addon) + case agent.HealthProberTypeWorkloadAvailability: + return s.analyzeWorkloadsWorkProber(agentAddon, cluster, addon) default: return nil, nil, fmt.Errorf("unsupported health prober type %s", agentAddon.GetAgentAddonOptions().HealthProber.Type) } @@ -245,6 +257,31 @@ func (s *healthCheckSyncer) analyzeDeploymentWorkProber( return probeFields, utils.DeploymentAvailabilityHealthCheck, nil } +func (s *healthCheckSyncer) analyzeWorkloadsWorkProber( + agentAddon agent.AgentAddon, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, +) ([]agent.ProbeField, agent.AddonHealthCheckFunc, error) { + probeFields := []agent.ProbeField{} + + manifests, err := agentAddon.Manifests(cluster, addon) + if err != nil { + return nil, nil, err + } + + workloads := utils.FilterWorkloads(manifests) + for _, workload := range workloads { + manifestConfig := utils.WellKnowManifestConfig(workload.Group, workload.Resource, + workload.Namespace, workload.Name) + probeFields = append(probeFields, agent.ProbeField{ + ResourceIdentifier: manifestConfig.ResourceIdentifier, + ProbeRules: manifestConfig.FeedbackRules, + }) + } + + return probeFields, utils.WorkloadAvailabilityHealthCheck, nil +} + func findResultByIdentifier(identifier workapiv1.ResourceIdentifier, manifestConditions []workapiv1.ManifestCondition) *workapiv1.StatusFeedbackResult { for _, status := range manifestConditions { if identifier.Group != status.ResourceMeta.Group { diff --git a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go index 1d292233..0b84799b 100644 --- a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go +++ b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync_test.go @@ -39,9 +39,13 @@ type healthCheckTestAgent struct { health *agent.HealthProber } -func (t *healthCheckTestAgent) Manifests(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { +func (t *healthCheckTestAgent) Manifests(cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { - return []runtime.Object{NewFakeDeployment("test-deployment", "default")}, nil + return []runtime.Object{ + NewFakeDeployment("test-deployment", "default"), + NewFakeDaemonSet("test-daemonset", "default"), + }, nil } func (t *healthCheckTestAgent) GetAgentAddonOptions() agent.AgentAddonOptions { @@ -84,6 +88,37 @@ func NewFakeDeployment(namespace, name string) *appsv1.Deployment { } } +func NewFakeDaemonSet(namespace, name string) *appsv1.DaemonSet { + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Namespace: name, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "addon": "test", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Image: "test", + }, + }, + }, + }, + }, + } +} + func TestHealthCheckReconcile(t *testing.T) { cases := []struct { name string @@ -530,6 +565,270 @@ func TestHealthCheckReconcile(t *testing.T) { Message: "test add-on is available.", }, }, + + { + name: "Health check mode is workload availability but manifestApplied condition is not true", + testAddon: &healthCheckTestAgent{name: "test", + health: &agent.HealthProber{Type: agent.HealthProberTypeWorkloadAvailability}, + }, + addon: addontesting.NewAddon("test", "cluster1"), + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{}, + }, + { + name: "Health check mode is workload availability but no work", + testAddon: &healthCheckTestAgent{name: "test", + health: &agent.HealthProber{Type: agent.HealthProberTypeWorkloadAvailability}, + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionUnknown, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkNotFound, + Message: "Work for addon is not found", + }, + }, + { + name: "Health check mode is workload availability but work is unavailable", + testAddon: &healthCheckTestAgent{name: "test", + health: &agent.HealthProber{Type: agent.HealthProberTypeWorkloadAvailability}, + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + existingWork: []runtime.Object{ + &v1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-test-deploy-01", + Namespace: "cluster1", + Labels: map[string]string{ + "open-cluster-management.io/addon-name": "test", + }, + }, + Spec: v1.ManifestWorkSpec{}, + Status: v1.ManifestWorkStatus{ + Conditions: []metav1.Condition{ + { + Type: v1.WorkAvailable, + Status: metav1.ConditionFalse, + Message: "failed to apply", + }, + }, + }, + }, + }, + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkNotApply, + Message: "failed to apply", + }, + }, + { + name: "Health check mode is workload availability but no result", + testAddon: &healthCheckTestAgent{name: "test", + health: &agent.HealthProber{Type: agent.HealthProberTypeWorkloadAvailability}, + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + existingWork: []runtime.Object{ + &v1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-test-deploy-01", + Namespace: "cluster1", + Labels: map[string]string{ + "open-cluster-management.io/addon-name": "test", + }, + }, + Spec: v1.ManifestWorkSpec{}, + Status: v1.ManifestWorkStatus{ + Conditions: []metav1.Condition{ + { + Type: v1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionUnknown, + Reason: addonapiv1alpha1.AddonAvailableReasonNoProbeResult, + Message: "Probe results are not returned", + }, + }, + { + name: "Health check mode is workload availability but cluster availability is unknown", + cluster: &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Status: clusterv1.ManagedClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1.ManagedClusterConditionAvailable, + Status: metav1.ConditionUnknown, + }, + }, + }, + }, + testAddon: &healthCheckTestAgent{name: "test", + health: &agent.HealthProber{Type: agent.HealthProberTypeWorkloadAvailability}, + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + existingWork: []runtime.Object{ + &v1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-test-deploy-01", + Namespace: "cluster1", + Labels: map[string]string{ + "open-cluster-management.io/addon-name": "test", + }, + }, + Spec: v1.ManifestWorkSpec{}, + Status: v1.ManifestWorkStatus{ + ResourceStatus: v1.ManifestResourceStatus{ + Manifests: []v1.ManifestCondition{ + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "daemonsets", + Name: "test-daemonset", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{ + Values: []v1.FeedbackValue{ + { + Name: "DesiredNumberScheduled", + Value: v1.FieldValue{ + Integer: boolPtr(1), + }, + }, + { + Name: "NumberReady", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + }, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: v1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{}, + }, + { + name: "Health check mode is workload availability and WorkProber check pass", + testAddon: &healthCheckTestAgent{name: "test", + health: &agent.HealthProber{Type: agent.HealthProberTypeWorkloadAvailability}, + }, + addon: addontesting.NewAddonWithConditions("test", "cluster1", manifestAppliedCondition), + existingWork: []runtime.Object{ + &v1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-test-deploy-01", + Namespace: "cluster1", + Labels: map[string]string{ + "open-cluster-management.io/addon-name": "test", + }, + }, + Spec: v1.ManifestWorkSpec{}, + Status: v1.ManifestWorkStatus{ + ResourceStatus: v1.ManifestResourceStatus{ + Manifests: []v1.ManifestCondition{ + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "deployments", + Name: "test-deployment", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{ + Values: []v1.FeedbackValue{ + { + Name: "Replicas", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + { + Name: "ReadyReplicas", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + }, + }, + }, + { + ResourceMeta: v1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Version: "", + Kind: "", + Resource: "daemonsets", + Name: "test-daemonset", + Namespace: "default", + }, + StatusFeedbacks: v1.StatusFeedbackResult{ + Values: []v1.FeedbackValue{ + { + Name: "DesiredNumberScheduled", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + { + Name: "NumberReady", + Value: v1.FieldValue{ + Integer: boolPtr(2), + }, + }, + }, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: v1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + expectedErr: nil, + expectedHealthCheckMode: addonapiv1alpha1.HealthCheckModeCustomized, + expectAvailableCondition: metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeAvailable, + Message: "test add-on is available.", + }, + }, } for _, c := range cases { @@ -571,7 +870,7 @@ func TestHealthCheckReconcile(t *testing.T) { if !equality.Semantic.DeepEqual(addon.Status.HealthCheck.Mode, c.expectedHealthCheckMode) { t.Errorf("name %s, expected err %v, but got %v", - c.name, addon.Status.HealthCheck.Mode, c.expectedHealthCheckMode) + c.name, c.expectedHealthCheckMode, addon.Status.HealthCheck.Mode) } if c.expectAvailableCondition.Type != "" { diff --git a/pkg/addonmanager/controllers/agentdeploy/util_test.go b/pkg/addonmanager/controllers/agentdeploy/util_test.go index d10c0cd9..b3c026e0 100644 --- a/pkg/addonmanager/controllers/agentdeploy/util_test.go +++ b/pkg/addonmanager/controllers/agentdeploy/util_test.go @@ -226,6 +226,45 @@ func TestGetManifestConfigOption(t *testing.T) { }, }, }, + { + name: "workload availability type", + agentAddon: &testAgent{ + name: "test", + objects: []runtime.Object{ + NewFakeDeployment("test-deployment", "default"), + NewFakeDaemonSet("test-daemonset", "default"), + }, + healthProber: &agent.HealthProber{Type: agent.HealthProberTypeWorkloadAvailability}, + }, + expectedManifestConfigOption: []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Name: "test-deployment", + Namespace: "default", + }, + FeedbackRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "daemonsets", + Name: "test-daemonset", + Namespace: "default", + }, + FeedbackRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + }, + }, { name: "set updater", agentAddon: &testAgent{ diff --git a/pkg/addonmanager/controllers/agentdeploy/utils.go b/pkg/addonmanager/controllers/agentdeploy/utils.go index 7df50357..7f3a0a33 100644 --- a/pkg/addonmanager/controllers/agentdeploy/utils.go +++ b/pkg/addonmanager/controllers/agentdeploy/utils.go @@ -501,6 +501,21 @@ func getManifestConfigOption(agentAddon agent.AgentAddon, } } + if agentAddon.GetAgentAddonOptions().HealthProber != nil && + agentAddon.GetAgentAddonOptions().HealthProber.Type == agent.HealthProberTypeWorkloadAvailability { + + manifests, err := agentAddon.Manifests(cluster, addon) + if err != nil { + return manifestConfigs + } + workloads := utils.FilterWorkloads(manifests) + for _, workload := range workloads { + manifestConfig := utils.WellKnowManifestConfig(workload.Group, workload.Resource, + workload.Namespace, workload.Name) + manifestConfigs = append(manifestConfigs, manifestConfig) + } + } + if updaters := agentAddon.GetAgentAddonOptions().Updaters; updaters != nil { for _, updater := range updaters { strategy := updater.UpdateStrategy diff --git a/pkg/agent/inteface.go b/pkg/agent/inteface.go index 721426cc..edbbc8b8 100644 --- a/pkg/agent/inteface.go +++ b/pkg/agent/inteface.go @@ -203,6 +203,11 @@ const ( // with the availability of the corresponding agent deployment resources on the managed cluster. // It's a special case of HealthProberTypeWork. HealthProberTypeDeploymentAvailability HealthProberType = "DeploymentAvailability" + // HealthProberTypeWorkloadAvailability indicates the healthiness of the addon is connected + // with the availability of all the corresponding agent workload resources(only Deployment and + // DaemonSet are supported for now) on the managed cluster. + // It's a special case of HealthProberTypeWork. + HealthProberTypeWorkloadAvailability HealthProberType = "WorkloadAvailability" ) func KubeClientSignerConfigurations(addonName, agentName string) func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { diff --git a/pkg/utils/probe_helper.go b/pkg/utils/probe_helper.go index 4d038794..b43c6152 100644 --- a/pkg/utils/probe_helper.go +++ b/pkg/utils/probe_helper.go @@ -6,6 +6,7 @@ import ( appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" workapiv1 "open-cluster-management.io/api/work/v1" @@ -41,7 +42,7 @@ func (d *DeploymentProber) ProbeFields() []agent.ProbeField { for _, deploy := range d.deployments { probeFields = append(probeFields, agent.ProbeField{ ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", + Group: appsv1.GroupName, Resource: "deployments", Name: deploy.Name, Namespace: deploy.Namespace, @@ -56,30 +57,66 @@ func (d *DeploymentProber) ProbeFields() []agent.ProbeField { return probeFields } -func DeploymentAvailabilityHealthCheck(identifier workapiv1.ResourceIdentifier, result workapiv1.StatusFeedbackResult) error { - if identifier.Resource != "deployments" { +func DeploymentAvailabilityHealthCheck(identifier workapiv1.ResourceIdentifier, + result workapiv1.StatusFeedbackResult) error { + return WorkloadAvailabilityHealthCheck(identifier, result) +} + +func WorkloadAvailabilityHealthCheck(identifier workapiv1.ResourceIdentifier, + result workapiv1.StatusFeedbackResult) error { + // only support deployments and daemonsets for now + if identifier.Resource != "deployments" && identifier.Resource != "daemonsets" { return fmt.Errorf("unsupported resource type %s", identifier.Resource) } - if identifier.Group != "apps" { + if identifier.Group != appsv1.GroupName { return fmt.Errorf("unsupported resource group %s", identifier.Group) } if len(result.Values) == 0 { - return fmt.Errorf("no values are probed for deployment %s/%s", identifier.Namespace, identifier.Name) + return fmt.Errorf("no values are probed for %s %s/%s", + identifier.Resource, identifier.Namespace, identifier.Name) } + + readyReplicas := -1 + desiredNumberReplicas := -1 for _, value := range result.Values { - if value.Name != "ReadyReplicas" { - continue + // for deployment + if value.Name == "ReadyReplicas" { + readyReplicas = int(*value.Value.Integer) + } + if value.Name == "Replicas" { + desiredNumberReplicas = int(*value.Value.Integer) } - if *value.Value.Integer >= 1 { - return nil + // for daemonset + if value.Name == "NumberReady" { + readyReplicas = int(*value.Value.Integer) + } + if value.Name == "DesiredNumberScheduled" { + desiredNumberReplicas = int(*value.Value.Integer) } + } - return fmt.Errorf("readyReplica is %d for deployment %s/%s", - *value.Value.Integer, identifier.Namespace, identifier.Name) + if readyReplicas == -1 { + return fmt.Errorf("readyReplica is not probed") + } + if desiredNumberReplicas == -1 { + return fmt.Errorf("desiredNumberReplicas is not probed") } - return fmt.Errorf("readyReplica is not probed") + + switch identifier.Resource { + case "deployments": + if desiredNumberReplicas == 0 || readyReplicas >= 1 { + return nil + } + case "daemonsets": + if readyReplicas == desiredNumberReplicas && readyReplicas > -1 { + return nil + } + } + + return fmt.Errorf("desiredNumberReplicas is %d but readyReplica is %d for %s %s/%s", + desiredNumberReplicas, readyReplicas, identifier.Resource, identifier.Namespace, identifier.Name) } func FilterDeployments(objects []runtime.Object) []*appsv1.Deployment { @@ -94,12 +131,50 @@ func FilterDeployments(objects []runtime.Object) []*appsv1.Deployment { return deployments } +type WorkloadMetadata struct { + schema.GroupResource + types.NamespacedName +} + +func FilterWorkloads(objects []runtime.Object) []WorkloadMetadata { + workloads := []WorkloadMetadata{} + for _, obj := range objects { + deployment, err := ConvertToDeployment(obj) + if err == nil { + workloads = append(workloads, WorkloadMetadata{ + GroupResource: schema.GroupResource{ + Group: appsv1.GroupName, + Resource: "deployments", + }, + NamespacedName: types.NamespacedName{ + Namespace: deployment.Namespace, + Name: deployment.Name, + }, + }) + } + daemonset, err := ConvertToDaemonSet(obj) + if err == nil { + workloads = append(workloads, WorkloadMetadata{ + GroupResource: schema.GroupResource{ + Group: appsv1.GroupName, + Resource: "daemonsets", + }, + NamespacedName: types.NamespacedName{ + Namespace: daemonset.Namespace, + Name: daemonset.Name, + }, + }) + } + } + return workloads +} + func ConvertToDeployment(obj runtime.Object) (*appsv1.Deployment, error) { if deployment, ok := obj.(*appsv1.Deployment); ok { return deployment, nil } - if obj.GetObjectKind().GroupVersionKind().Group != "apps" || + if obj.GetObjectKind().GroupVersionKind().Group != appsv1.GroupName || obj.GetObjectKind().GroupVersionKind().Kind != "Deployment" { return nil, fmt.Errorf("not deployment object, %v", obj.GetObjectKind()) } @@ -118,10 +193,14 @@ func ConvertToDeployment(obj runtime.Object) (*appsv1.Deployment, error) { } func DeploymentWellKnowManifestConfig(namespace, name string) workapiv1.ManifestConfigOption { + return WellKnowManifestConfig(appsv1.GroupName, "deployments", namespace, name) +} + +func WellKnowManifestConfig(group, resources, namespace, name string) workapiv1.ManifestConfigOption { return workapiv1.ManifestConfigOption{ ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", + Group: group, + Resource: resources, Name: name, Namespace: namespace, }, @@ -132,3 +211,42 @@ func DeploymentWellKnowManifestConfig(namespace, name string) workapiv1.Manifest }, } } + +func FilterDaemonSets(objects []runtime.Object) []*appsv1.DaemonSet { + daemonsets := []*appsv1.DaemonSet{} + for _, obj := range objects { + daemonset, err := ConvertToDaemonSet(obj) + if err != nil { + continue + } + daemonsets = append(daemonsets, daemonset) + } + return daemonsets +} + +func ConvertToDaemonSet(obj runtime.Object) (*appsv1.DaemonSet, error) { + if daemonSet, ok := obj.(*appsv1.DaemonSet); ok { + return daemonSet, nil + } + + if obj.GetObjectKind().GroupVersionKind().Group != appsv1.GroupName || + obj.GetObjectKind().GroupVersionKind().Kind != "DaemonSet" { + return nil, fmt.Errorf("not daemonset object, %v", obj.GetObjectKind()) + } + + daemonSet := &appsv1.DaemonSet{} + uobj, ok := obj.(*unstructured.Unstructured) + if !ok { + return daemonSet, fmt.Errorf("not unstructured object, %v", obj.GetObjectKind()) + } + + err := runtime.DefaultUnstructuredConverter.FromUnstructured(uobj.Object, daemonSet) + if err != nil { + return nil, err + } + return daemonSet, nil +} + +func DaemonSetWellKnowManifestConfig(namespace, name string) workapiv1.ManifestConfigOption { + return WellKnowManifestConfig(appsv1.GroupName, "daemonsets", namespace, name) +} diff --git a/pkg/utils/probe_helper_test.go b/pkg/utils/probe_helper_test.go index b8d864a2..da7bdd85 100644 --- a/pkg/utils/probe_helper_test.go +++ b/pkg/utils/probe_helper_test.go @@ -24,7 +24,7 @@ func TestDeploymentProbe(t *testing.T) { { name: "no result", result: workapiv1.StatusFeedbackResult{}, - expectedErr: "no values are probed for deployment testns/test", + expectedErr: "no values are probed for deployments testns/test", }, { name: "no matched value", @@ -64,7 +64,27 @@ func TestDeploymentProbe(t *testing.T) { }, }, }, - expectedErr: "readyReplica is 0 for deployment testns/test", + expectedErr: "desiredNumberReplicas is 1 but readyReplica is 0 for deployments testns/test", + }, + { + name: "check desired replicas is 0 passed", + result: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Integer: boolPtr(0), + }, + }, + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Integer: boolPtr(0), + }, + }, + }, + }, + expectedErr: "", }, { name: "check passed", diff --git a/test/integration/cloudevents/agent_deploy_test.go b/test/integration/cloudevents/agent_deploy_test.go index d5581533..cc7d5f63 100644 --- a/test/integration/cloudevents/agent_deploy_test.go +++ b/test/integration/cloudevents/agent_deploy_test.go @@ -107,6 +107,43 @@ const ( } } }` + + daemonSetJson = `{ + "apiVersion": "apps/v1", + "kind": "DaemonSet", + "metadata": { + "name": "nginx-ds", + "namespace": "default" + }, + "spec": { + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "image": "nginx:1.14.2", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ] + } + ] + } + } + } + }` ) var _ = ginkgo.Describe("Agent deploy", func() { @@ -539,6 +576,165 @@ var _ = ginkgo.Describe("Agent deploy", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) + + ginkgo.It("Should deploy agent and get available with workload availability prober func", func() { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(deploymentJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + objDaemonSet := &unstructured.Unstructured{} + err = objDaemonSet.UnmarshalJSON([]byte(daemonSetJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{obj, objDaemonSet} + testAddonImpl.prober = &agent.HealthProber{ + Type: agent.HealthProberTypeWorkloadAvailability, + } + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + var work *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + work = works[0] + if len(work.Spec.Workload.Manifests) != 2 { + return fmt.Errorf("Unexpected number of work manifests: %d", len(work.Spec.Workload.Manifests)) + } + + if len(work.Spec.ManifestConfigs) != 2 { + return fmt.Errorf("Unexpected number of work manifests configuration: %d", + len(work.Spec.ManifestConfigs)) + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[1].Raw, []byte(daemonSetJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[1].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + newWork := work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, + Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, + Status: metav1.ConditionTrue, Reason: "WorkAvailable"}) + + replica := int64(1) + newWork.Status.ResourceStatus = workv1.ManifestResourceStatus{ + Manifests: []workv1.ManifestCondition{ + { + ResourceMeta: workv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "nginx-deployment", + Namespace: "default", + }, + StatusFeedbacks: workv1.StatusFeedbackResult{ + Values: []workv1.FeedbackValue{ + { + Name: "Replicas", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + { + Name: "ReadyReplicas", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + { + ResourceMeta: workv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "daemonsets", + Name: "nginx-ds", + Namespace: "default", + }, + StatusFeedbacks: workv1.StatusFeedbackResult{ + Values: []workv1.FeedbackValue{ + { + Name: "NumberReady", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + { + Name: "DesiredNumberScheduled", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + }, + }, + }, + }, + } + + workBytes, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err := jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for addon to be available + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionAvailable) { + return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + } + if cond := meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing); cond != nil { + return fmt.Errorf("expected no addon progressing condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) }) // The addon owner controller exist in general addon manager. diff --git a/test/integration/kube/agent_deploy_test.go b/test/integration/kube/agent_deploy_test.go index 195069fb..3eccc657 100644 --- a/test/integration/kube/agent_deploy_test.go +++ b/test/integration/kube/agent_deploy_test.go @@ -103,6 +103,43 @@ const ( } }` + daemonSetJson = `{ + "apiVersion": "apps/v1", + "kind": "DaemonSet", + "metadata": { + "name": "nginx-ds", + "namespace": "default" + }, + "spec": { + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "image": "nginx:1.14.2", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ] + } + ] + } + } + } + }` + mchJson = `{ "apiVersion": "operator.open-cluster-management.io/v1", "kind": "MultiClusterHub", @@ -371,6 +408,13 @@ var _ = ginkgo.Describe("Agent deploy", func() { Integer: &replica, }, }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, }, }, Conditions: []metav1.Condition{ @@ -525,6 +569,13 @@ var _ = ginkgo.Describe("Agent deploy", func() { Integer: &replica, }, }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, }, }, Conditions: []metav1.Condition{ @@ -555,6 +606,204 @@ var _ = ginkgo.Describe("Agent deploy", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) + ginkgo.It("Should deploy agent and get available with workload availability prober func", func() { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(deploymentJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + objDaemonSet := &unstructured.Unstructured{} + err = objDaemonSet.UnmarshalJSON([]byte(daemonSetJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{obj, objDaemonSet} + testAddonImpl.prober = &agent.HealthProber{ + Type: agent.HealthProberTypeWorkloadAvailability, + } + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + gomega.Eventually(func() error { + work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName). + Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Spec.Workload.Manifests) != 2 { + return fmt.Errorf("Unexpected number of work manifests: %d", len(work.Spec.Workload.Manifests)) + } + + if len(work.Spec.ManifestConfigs) != 2 { + return fmt.Errorf("Unexpected number of work manifests configuration: %d", + len(work.Spec.ManifestConfigs)) + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[1].Raw, []byte(daemonSetJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[1].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName). + Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + meta.SetStatusCondition(&work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable"}) + + replica := int64(1) + + // update work status to a wrong feedback status + work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{ + Manifests: []workapiv1.ManifestCondition{ + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "nginx-deployment", + Namespace: "default", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "ReplicasTest", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + meta.SetStatusCondition(&work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName). + UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionFalse(addon.Status.Conditions, "Available") { + return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update to the correct condition + work, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName). + Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + work.Status.ResourceStatus = workapiv1.ManifestResourceStatus{ + Manifests: []workapiv1.ManifestCondition{ + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "nginx-deployment", + Namespace: "default", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + { + ResourceMeta: workapiv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "daemonsets", + Name: "nginx-ds", + Namespace: "default", + }, + StatusFeedbacks: workapiv1.StatusFeedbackResult{ + Values: []workapiv1.FeedbackValue{ + { + Name: "NumberReady", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + { + Name: "DesiredNumberScheduled", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, + }, + }, + }, + }, + } + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName). + UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + ginkgo.It("Should allow trigger externally", func() { obj := &unstructured.Unstructured{} err := obj.UnmarshalJSON([]byte(deploymentJson)) diff --git a/test/integration/kube/multiworks_test.go b/test/integration/kube/multiworks_test.go index 88ff4a51..4e197903 100644 --- a/test/integration/kube/multiworks_test.go +++ b/test/integration/kube/multiworks_test.go @@ -398,7 +398,7 @@ var _ = ginkgo.Describe("Agent deploy multi works", func() { StatusFeedbacks: workapiv1.StatusFeedbackResult{ Values: []workapiv1.FeedbackValue{ { - Name: "Replicas", + Name: "ReplicasTest", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, Integer: &replica, @@ -456,6 +456,13 @@ var _ = ginkgo.Describe("Agent deploy multi works", func() { Integer: &replica, }, }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, }, }, Conditions: []metav1.Condition{ @@ -507,6 +514,13 @@ var _ = ginkgo.Describe("Agent deploy multi works", func() { Integer: &replica, }, }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: &replica, + }, + }, }, }, Conditions: []metav1.Condition{