From d2e0d1784d63d616b671e9a39a5e27df379c6615 Mon Sep 17 00:00:00 2001 From: Aditya Thebe Date: Wed, 17 Apr 2024 18:42:59 +0545 Subject: [PATCH] feat: copy resource_customizations from argocd --- pkg/resource_customizations | 1 - .../apps.kruise.io/AdvancedCronJob/health.lua | 36 ++++ .../AdvancedCronJob/health_test.yaml | 17 ++ .../AdvancedCronJob/testdata/activeJobs.yaml | 30 ++++ .../testdata/lastScheduleTime.yaml | 23 +++ .../testdata/notScheduled.yaml | 22 +++ .../AdvancedCronJob/testdata/suspended.yaml | 23 +++ .../apps.kruise.io/BroadcastJob/health.lua | 32 ++++ .../BroadcastJob/health_test.yaml | 17 ++ .../BroadcastJob/testdata/failed.yaml | 31 ++++ .../BroadcastJob/testdata/running.yaml | 22 +++ .../BroadcastJob/testdata/succeeded.yaml | 31 ++++ .../BroadcastJob/testdata/suspended.yaml | 31 ++++ .../apps.kruise.io/CloneSet/health.lua | 33 ++++ .../apps.kruise.io/CloneSet/health_test.yaml | 21 +++ .../CloneSet/testdata/degraded.yaml | 35 ++++ .../CloneSet/testdata/healthy.yaml | 36 ++++ .../testdata/partition_suspended.yaml | 31 ++++ .../CloneSet/testdata/suspended.yaml | 35 ++++ .../CloneSet/testdata/unknown.yaml | 5 + .../apps.kruise.io/DaemonSet/health.lua | 35 ++++ .../apps.kruise.io/DaemonSet/health_test.yaml | 21 +++ .../DaemonSet/testdata/degraded.yaml | 34 ++++ .../DaemonSet/testdata/healthy.yaml | 34 ++++ .../testdata/partition_suspended.yaml | 33 ++++ .../DaemonSet/testdata/suspended.yaml | 33 ++++ .../DaemonSet/testdata/unknown.yaml | 5 + .../apps.kruise.io/StatefulSet/health.lua | 35 ++++ .../StatefulSet/health_test.yaml | 21 +++ .../StatefulSet/testdata/degraded.yaml | 42 +++++ .../StatefulSet/testdata/healthy.yaml | 41 +++++ .../testdata/partition_suspended.yaml | 36 ++++ .../StatefulSet/testdata/suspended.yaml | 36 ++++ .../StatefulSet/testdata/unknown.yaml | 5 + .../DeploymentConfig/health.lua | 27 +++ .../DeploymentConfig/health_test.yaml | 21 +++ .../DeploymentConfig/testdata/degraded.yaml | 156 +++++++++++++++++ .../DeploymentConfig/testdata/healthy.yaml | 66 +++++++ .../testdata/healthy_zero_replicas.yaml | 68 ++++++++ .../testdata/progressing.yaml | 163 +++++++++++++++++ .../testdata/progressing_rc_updated.yaml | 66 +++++++ .../apps/DaemonSet/actions/action_test.yaml | 4 + .../apps/DaemonSet/actions/discovery.lua | 3 + .../apps/DaemonSet/actions/restart/action.lua | 9 + .../actions/testdata/daemonset-restarted.yaml | 50 ++++++ .../DaemonSet/actions/testdata/daemonset.yaml | 48 +++++ .../apps/Deployment/actions/action_test.yaml | 10 ++ .../apps/Deployment/actions/discovery.lua | 10 ++ .../apps/Deployment/actions/pause/action.lua | 2 + .../Deployment/actions/restart/action.lua | 9 + .../apps/Deployment/actions/resume/action.lua | 2 + .../actions/testdata/deployment-pause.yaml | 59 +++++++ .../testdata/deployment-restarted.yaml | 63 +++++++ .../actions/testdata/deployment-resume.yaml | 58 ++++++ .../actions/testdata/deployment.yaml | 61 +++++++ .../apps/StatefulSet/actions/action_test.yaml | 4 + .../apps/StatefulSet/actions/discovery.lua | 3 + .../StatefulSet/actions/restart/action.lua | 9 + .../testdata/statefulset-restarted.yaml | 52 ++++++ .../actions/testdata/statefulset.yaml | 50 ++++++ .../AnalysisRun/actions/action_test.yaml | 13 ++ .../AnalysisRun/actions/discovery.lua | 8 + .../AnalysisRun/actions/terminate/action.lua | 2 + .../actions/testdata/failedAnalysisRun.yaml | 31 ++++ .../actions/testdata/runningAnalysisRun.yaml | 35 ++++ .../runningAnalysisRun_terminated.yaml | 36 ++++ .../argoproj.io/AnalysisRun/health.lua | 40 +++++ .../argoproj.io/AnalysisRun/health_test.yaml | 45 +++++ .../testdata/errorAnalysisRun.yaml | 47 +++++ .../errorAnalysisRunWithStatusMessage.yaml | 48 +++++ .../testdata/failedAnalysisRun.yaml | 31 ++++ .../failedAnalysisRunWithStatusMessage.yaml | 32 ++++ .../testdata/inconclusiveAnalysisRun.yaml | 31 ++++ ...onclusiveAnalysisRunWithStatusMessage.yaml | 32 ++++ .../testdata/noStatusAnalysisRun.yaml | 19 ++ .../testdata/pendingAnalysisRun.yaml | 17 ++ .../testdata/runningAnalysisRun.yaml | 35 ++++ .../testdata/successfulAnalysisRun.yaml | 30 ++++ .../testdata/terminatedAnalysisRun.yaml | 71 ++++++++ .../argoproj.io/ApplicationSet/health.lua | 24 +++ .../ApplicationSet/health_test.yaml | 13 ++ .../errorApplicationSetWithStatusMessage.yaml | 40 +++++ .../testdata/healthyApplicationSet.yaml | 60 +++++++ .../testdata/noStatusApplicationSet.yaml | 43 +++++ .../CronWorkflow/actions/action_test.yaml | 7 + .../actions/create-workflow/action.lua | 82 +++++++++ .../CronWorkflow/actions/discovery.lua | 6 + .../testdata/cronworkflow-without-label.yaml | 31 ++++ .../actions/testdata/cronworkflow.yaml | 34 ++++ .../testdata/workflow-without-label.yaml | 26 +++ .../actions/testdata/workflow.yaml | 28 +++ .../argoproj.io/CronWorkflow/health.lua | 26 +++ .../argoproj.io/CronWorkflow/health_test.yaml | 21 +++ .../degradedCronWorkflowWithSpecError.yaml | 13 ++ ...gradedCronWorkflowWithSubmissionError.yaml | 13 ++ .../testdata/healthyCronWorkflow.yaml | 13 ++ .../testdata/noConditionsCronWorkflow.yaml | 7 + .../withConditionButHealthyCronWorkflow.yaml | 13 ++ .../argoproj.io/EventBus/health.lua | 21 +++ .../argoproj.io/EventBus/health_test.yaml | 9 + .../EventBus/testdata/degraded.yaml | 21 +++ .../EventBus/testdata/healthy.yaml | 19 ++ .../argoproj.io/Experiment/health.lua | 28 +++ .../argoproj.io/Experiment/health_test.yaml | 25 +++ .../Experiment/testdata/errorExperiment.yaml | 49 ++++++ .../Experiment/testdata/failedExperiment.yaml | 54 ++++++ .../testdata/noStatusExperiment.yaml | 33 ++++ .../testdata/pendingExperiment.yaml | 47 +++++ .../testdata/runningExperiment.yaml | 40 +++++ .../testdata/successfulExperiment.yaml | 61 +++++++ .../Rollout/actions/abort/action.lua | 2 + .../Rollout/actions/action_test.yaml | 131 ++++++++++++++ .../argoproj.io/Rollout/actions/discovery.lua | 28 +++ .../Rollout/actions/promote-full/action.lua | 14 ++ .../Rollout/actions/restart/action.lua | 3 + .../Rollout/actions/resume/action.lua | 9 + .../Rollout/actions/retry/action.lua | 2 + .../actions/testdata/aborted_bg_rollout.yaml | 63 +++++++ .../actions/testdata/aborted_rollout.yaml | 63 +++++++ .../testdata/has_pause_condition_rollout.yaml | 61 +++++++ .../has_pause_condition_rollout_aborted.yaml | 62 +++++++ .../actions/testdata/healthy_rollout.yaml | 64 +++++++ .../testdata/no_pause_condition_rollout.yaml | 58 ++++++ .../actions/testdata/one_replica_rollout.yaml | 51 ++++++ .../testdata/pre_v0.6_nil_paused_rollout.yaml | 51 ++++++ .../testdata/pre_v0.6_not_paused_rollout.yaml | 52 ++++++ .../testdata/pre_v0.6_paused_rollout.yaml | 52 ++++++ .../testdata/promote-full_rollout.yaml | 64 +++++++ .../actions/testdata/retried_rollout.yaml | 81 +++++++++ .../testdata/rollout_not_restarted.yaml | 47 +++++ .../actions/testdata/rollout_restarted.yaml | 48 +++++ .../testdata/three_replica_rollout.yaml | 51 ++++++ .../testdata/v0.9_aborted_bg_rollout.yaml | 63 +++++++ .../testdata/v0.9_aborted_rollout.yaml | 81 +++++++++ .../testdata/v0.9_promote-full_rollout.yaml | 81 +++++++++ .../argoproj.io/Rollout/health.lua | 165 ++++++++++++++++++ .../argoproj.io/Rollout/health_test.yaml | 94 ++++++++++ .../healthy_servingActiveService.yaml | 56 ++++++ .../progressing_addingMoreReplicas.yaml | 54 ++++++ .../progressing_waitingUntilAvailable.yaml | 55 ++++++ .../canary/healthy_emptyStepsList.yaml | 65 +++++++ .../canary/healthy_executedAllSteps.yaml | 73 ++++++++ .../healthy_executedAllStepsPreV0.8.yaml | 73 ++++++++ .../testdata/canary/healthy_noSteps.yaml | 66 +++++++ .../progressing_killingOldReplicas.yaml | 61 +++++++ .../testdata/canary/progressing_noSteps.yaml | 64 +++++++ .../canary/progressing_setWeightStep.yaml | 69 ++++++++ .../testdata/degraded_abortedRollout.yaml | 70 ++++++++ .../testdata/degraded_invalidSpec.yaml | 65 +++++++ .../testdata/degraded_rolloutTimeout.yaml | 84 +++++++++ .../testdata/degraded_statusPhaseMessage.yaml | 50 ++++++ ...ealthy_legacy_v0.9_observedGeneration.yaml | 60 +++++++ ...egacy_v0.9_observedGeneration_numeric.yaml | 60 +++++++ ...thy_legacy_v1.0_newWorkloadGeneration.yaml | 58 ++++++ .../healthy_newWorkloadGeneration.yaml | 60 +++++++ .../testdata/newRolloutWithoutStatus.yaml | 31 ++++ .../testdata/progressing_newGeneration.yaml | 60 +++++++ .../progressing_newWorkloadGeneration.yaml | 60 +++++++ .../testdata/suspended_controllerPause.yaml | 52 ++++++ .../Rollout/testdata/suspended_userPause.yaml | 46 +++++ .../suspended_v1.0_pausedRollout.yaml | 97 ++++++++++ .../WorkflowTemplate/actions/action_test.yaml | 4 + .../actions/create-workflow/action.lua | 39 +++++ .../WorkflowTemplate/actions/discovery.lua | 6 + .../actions/testdata/workflow.yaml | 16 ++ .../actions/testdata/workflowtemplate.yaml | 24 +++ .../batch/CronJob/actions/action_test.yaml | 4 + .../CronJob/actions/create-job/action.lua | 64 +++++++ .../batch/CronJob/actions/discovery.lua | 6 + .../CronJob/actions/testdata/cronjob.yaml | 33 ++++ .../batch/CronJob/actions/testdata/job.yaml | 30 ++++ .../beat.k8s.elastic.co/Beat/health.lua | 31 ++++ .../beat.k8s.elastic.co/Beat/health_test.yaml | 29 +++ .../Beat/testdata/invalid.yaml | 12 ++ .../Beat/testdata/progressing.yaml | 11 ++ .../Beat/testdata/ready_green.yaml | 13 ++ .../Beat/testdata/ready_red.yaml | 10 ++ .../Beat/testdata/ready_yellow.yaml | 11 ++ .../testdata/ready_yellow_single_node.yaml | 10 ++ .../Beat/testdata/unknown.yaml | 8 + .../bitnami.com/SealedSecret/health.lua | 20 +++ .../bitnami.com/SealedSecret/health_test.yaml | 13 ++ .../SealedSecret/testdata/degraded.yaml | 24 +++ .../SealedSecret/testdata/healthy.yaml | 24 +++ .../SealedSecret/testdata/progressing.yaml | 16 ++ .../cassandra.rook.io/Cluster/health.lua | 24 +++ .../Cluster/health_test.yaml | 9 + .../Cluster/testdata/healthy.yaml | 96 ++++++++++ .../Cluster/testdata/progressing.yaml | 96 ++++++++++ .../cdi.kubevirt.io/DataVolume/health.lua | 25 +++ .../DataVolume/health_test.yaml | 21 +++ .../DataVolume/testdata/degraded_badurl.yaml | 41 +++++ .../testdata/healthy_succeeded.yaml | 41 +++++ .../progressing_containercreating.yaml | 35 ++++ .../testdata/progressing_importing.yaml | 42 +++++ .../testdata/progressing_nostatus.yaml | 22 +++ .../cert-manager.io/Certificate/health.lua | 31 ++++ .../Certificate/health_test.yaml | 26 +++ .../testdata/degraded_configError.yaml | 35 ++++ .../Certificate/testdata/healthy_issued.yaml | 39 +++++ .../Certificate/testdata/healthy_renewed.yaml | 39 +++++ .../testdata/progressing_issuing.yaml | 37 ++++ .../testdata/progressing_issuing_last.yaml | 36 ++++ .../testdata/progressing_noStatus.yaml | 24 +++ .../cert-manager.io/ClusterIssuer/health.lua | 21 +++ .../ClusterIssuer/health_test.yaml | 14 ++ .../testdata/degraded_acmeFailed.yaml | 26 +++ .../testdata/healthy_registered.yaml | 25 +++ .../testdata/progressing_noStatus.yaml | 16 ++ .../cert-manager.io/Issuer/health.lua | 21 +++ .../cert-manager.io/Issuer/health_test.yaml | 14 ++ .../Issuer/testdata/degraded_acmeFailed.yaml | 28 +++ .../Issuer/testdata/healthy_registered.yaml | 27 +++ .../Issuer/testdata/progressing_noStatus.yaml | 18 ++ .../certmanager.k8s.io/Certificate/health.lua | 21 +++ .../Certificate/health_test.yaml | 18 ++ .../testdata/degraded_configError.yaml | 35 ++++ .../Certificate/testdata/healthy_issued.yaml | 39 +++++ .../Certificate/testdata/healthy_renewed.yaml | 39 +++++ .../testdata/progressing_noStatus.yaml | 24 +++ .../certmanager.k8s.io/Issuer/health.lua | 21 +++ .../Issuer/health_test.yaml | 14 ++ .../Issuer/testdata/degraded_acmeFailed.yaml | 28 +++ .../Issuer/testdata/healthy_registered.yaml | 27 +++ .../Issuer/testdata/progressing_noStatus.yaml | 18 ++ .../Distribution/health.lua | 42 +++++ .../Distribution/health_test.yaml | 37 ++++ .../testdata/degraded_reconcileError.yaml | 96 ++++++++++ .../Distribution/testdata/healthy.yaml | 92 ++++++++++ .../Distribution/testdata/progressing.yaml | 92 ++++++++++ .../testdata/progressing_creating.yaml | 92 ++++++++++ .../testdata/progressing_noStatus.yaml | 82 +++++++++ .../testdata/progressing_noavailable.yaml | 88 ++++++++++ .../Distribution/testdata/suspended.yaml | 94 ++++++++++ .../CloudFunctionsFunction/health.lua | 39 +++++ .../CloudFunctionsFunction/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../testdata/up_to_date.yaml | 9 + .../testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../CloudSchedulerJob/health.lua | 39 +++++ .../CloudSchedulerJob/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../testdata/up_to_date.yaml | 9 + .../testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../cluster.x-k8s.io/Cluster/health.lua | 40 +++++ .../cluster.x-k8s.io/Cluster/health_test.yaml | 21 +++ .../Cluster/testdata/degraded_failed.yaml | 41 +++++ .../Cluster/testdata/error_provisioned.yaml | 54 ++++++ .../Cluster/testdata/healthy_provisioned.yaml | 40 +++++ .../testdata/progressing_provisioning.yaml | 40 +++++ .../Cluster/testdata/suspended_paused.yaml | 42 +++++ .../cluster.x-k8s.io/Machine/health.lua | 34 ++++ .../cluster.x-k8s.io/Machine/health_test.yaml | 13 ++ .../Machine/testdata/degraded_failed.yaml | 44 +++++ .../Machine/testdata/healthy_running.yaml | 43 +++++ .../Machine/testdata/progressing_boot.yaml | 46 +++++ .../MachineDeployment/health.lua | 30 ++++ .../MachineDeployment/health_test.yaml | 21 +++ .../testdata/degraded_failed.yaml | 48 +++++ .../testdata/healthy_provisioned.yaml | 48 +++++ .../testdata/progressing_ScalingDown.yaml | 48 +++++ .../testdata/progressing_ScalingUp.yaml | 48 +++++ .../testdata/suspended_paused.yaml | 49 ++++++ .../MachineHealthCheck/health.lua | 14 ++ .../MachineHealthCheck/health_test.yaml | 9 + .../testdata/degraded_expectedMachines.yaml | 33 ++++ .../MachineHealthCheck/testdata/healthy.yaml | 33 ++++ .../ComputeDisk/health.lua | 39 +++++ .../ComputeDisk/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../ComputeDisk/testdata/up_to_date.yaml | 9 + .../ComputeDisk/testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../db.atlasgo.io/AtlasMigration/health.lua | 37 ++++ .../AtlasMigration/health_test.yaml | 13 ++ .../AtlasMigration/testdata/degraded.yaml | 29 +++ .../AtlasMigration/testdata/healthy.yaml | 30 ++++ .../AtlasMigration/testdata/progressing.yaml | 30 ++++ .../db.atlasgo.io/AtlasSchema/health.lua | 37 ++++ .../AtlasSchema/health_test.yaml | 13 ++ .../AtlasSchema/testdata/degraded.yaml | 38 ++++ .../AtlasSchema/testdata/healthy.yaml | 39 +++++ .../AtlasSchema/testdata/progressing.yaml | 35 ++++ .../Elasticsearch/health.lua | 48 +++++ .../Elasticsearch/health_test.yaml | 33 ++++ .../testdata/applyingchanges.yaml | 15 ++ .../Elasticsearch/testdata/invalid.yaml | 15 ++ .../Elasticsearch/testdata/migratingdata.yaml | 15 ++ .../Elasticsearch/testdata/progressing.yaml | 22 +++ .../Elasticsearch/testdata/ready_green.yaml | 15 ++ .../Elasticsearch/testdata/ready_red.yaml | 15 ++ .../Elasticsearch/testdata/ready_yellow.yaml | 15 ++ .../Elasticsearch/testdata/unknown.yaml | 12 ++ pkg/resource_customizations/embed.go | 10 ++ .../ClusterExternalSecret/health.lua | 25 +++ .../ClusterExternalSecret/health_test.yaml | 21 +++ .../testdata/healthy.yaml | 37 ++++ .../testdata/notready.yaml | 38 ++++ .../partiallyready-multiple-conditions.yaml | 43 +++++ .../testdata/partiallyready.yaml | 40 +++++ .../testdata/progressing.yaml | 30 ++++ .../ClusterSecretStore/health.lua | 20 +++ .../ClusterSecretStore/health_test.yaml | 9 + .../ClusterSecretStore/testdata/degraded.yaml | 16 ++ .../ClusterSecretStore/testdata/healthy.yaml | 17 ++ .../ExternalSecret/actions/action_test.yaml | 4 + .../ExternalSecret/actions/discovery.lua | 3 + .../ExternalSecret/actions/refresh/action.lua | 6 + .../testdata/external-secret-updated.yaml | 56 ++++++ .../actions/testdata/external-secret.yaml | 54 ++++++ .../ExternalSecret/health.lua | 20 +++ .../ExternalSecret/health_test.yaml | 13 ++ .../ExternalSecret/testdata/degraded.yaml | 52 ++++++ .../ExternalSecret/testdata/healthy.yaml | 54 ++++++ .../ExternalSecret/testdata/progressing.yaml | 45 +++++ .../PushSecret/actions/action_test.yaml | 4 + .../PushSecret/actions/discovery.lua | 3 + .../PushSecret/actions/push/action.lua | 6 + .../actions/testdata/push-secret-updated.yaml | 41 +++++ .../actions/testdata/push-secret.yaml | 39 +++++ .../external-secrets.io/PushSecret/health.lua | 20 +++ .../PushSecret/health_test.yaml | 13 ++ .../PushSecret/testdata/degraded.yaml | 33 ++++ .../PushSecret/testdata/healthy.yaml | 39 +++++ .../PushSecret/testdata/progressing.yaml | 24 +++ .../SecretStore/health.lua | 20 +++ .../SecretStore/health_test.yaml | 9 + .../SecretStore/testdata/degraded.yaml | 24 +++ .../SecretStore/testdata/healthy.yaml | 29 +++ .../flagger.app/Canary/health.lua | 31 ++++ .../flagger.app/Canary/health_test.yaml | 25 +++ .../flagger.app/Canary/testdata/degraded.yaml | 29 +++ .../Canary/testdata/finalising.yaml | 28 +++ .../Canary/testdata/initialized.yaml | 28 +++ .../Canary/testdata/progressing.yaml | 28 +++ .../Canary/testdata/promoting.yaml | 28 +++ .../Canary/testdata/succeeded.yaml | 28 +++ .../FlinkDeployment/health.lua | 24 +++ .../FlinkDeployment/health_test.yaml | 25 +++ .../testdata/degraded_error.yaml | 8 + .../testdata/healthy_running_v0.1.x.yaml | 11 ++ .../testdata/healthy_running_v1.x.yaml | 11 ++ .../testdata/healthy_suspended_v0.1.x.yaml | 11 ++ .../testdata/healthy_suspended_v1.x.yaml | 11 ++ .../progressing_deployedNotReady.yaml | 6 + .../testdata/progressing_deploying.yaml | 6 + .../IAMPartialPolicy/health.lua | 39 +++++ .../IAMPartialPolicy/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../IAMPartialPolicy/testdata/up_to_date.yaml | 9 + .../testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../IAMPolicy/health.lua | 39 +++++ .../IAMPolicy/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../IAMPolicy/testdata/up_to_date.yaml | 9 + .../IAMPolicy/testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../IAMPolicyMember/health.lua | 39 +++++ .../IAMPolicyMember/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../IAMPolicyMember/testdata/up_to_date.yaml | 9 + .../testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../IAMServiceAccount/health.lua | 39 +++++ .../IAMServiceAccount/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../testdata/up_to_date.yaml | 9 + .../testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../Iamrole/health.lua | 33 ++++ .../Iamrole/health_test.yaml | 20 +++ .../Iamrole/testdata/degraded_error.yaml | 29 +++ .../degraded_rolesMaxLimitReached.yaml | 26 +++ .../Iamrole/testdata/healthy.yaml | 27 +++ .../testdata/progressing_noStatus.yaml | 20 +++ .../install.istio.io/IstioOperator/health.lua | 33 ++++ .../IstioOperator/health_test.yaml | 17 ++ .../IstioOperator/testdata/degraded.yaml | 25 +++ .../IstioOperator/testdata/healthy.yaml | 26 +++ .../testdata/progressing_reconciling.yaml | 26 +++ .../testdata/progressing_updating.yaml | 25 +++ .../jaegertracing.io/Jaeger/health.lua | 16 ++ .../jaegertracing.io/Jaeger/health_test.yaml | 13 ++ .../Jaeger/testdata/degraded.yaml | 15 ++ .../Jaeger/testdata/healthy.yaml | 15 ++ .../Jaeger/testdata/progressing.yaml | 13 ++ .../KafkaCluster/health.lua | 41 +++++ .../KafkaCluster/health_test.yaml | 17 ++ .../KafkaCluster/testdata/degraded.yaml | 48 +++++ .../KafkaCluster/testdata/healthy.yaml | 48 +++++ .../KafkaCluster/testdata/reconciling.yaml | 48 +++++ .../KafkaCluster/testdata/updating.yaml | 48 +++++ .../kafka.strimzi.io/Kafka/health.lua | 21 +++ .../kafka.strimzi.io/Kafka/health_test.yaml | 12 ++ .../Kafka/testdata/degraded.yaml | 47 +++++ .../Kafka/testdata/healthy.yaml | 61 +++++++ .../Kafka/testdata/progressing_noStatus.yaml | 37 ++++ .../kafka.strimzi.io/KafkaConnect/health.lua | 21 +++ .../KafkaConnect/health_test.yaml | 12 ++ .../KafkaConnect/testdata/degraded.yaml | 104 +++++++++++ .../KafkaConnect/testdata/healthy.yaml | 103 +++++++++++ .../testdata/progressing_noStatus.yaml | 96 ++++++++++ .../kafka.strimzi.io/KafkaTopic/health.lua | 21 +++ .../KafkaTopic/health_test.yaml | 12 ++ .../KafkaTopic/testdata/degraded.yaml | 27 +++ .../KafkaTopic/testdata/healthy.yaml | 24 +++ .../testdata/progressing_noStatus.yaml | 18 ++ .../kafka.strimzi.io/KafkaUser/health.lua | 21 +++ .../KafkaUser/health_test.yaml | 12 ++ .../KafkaUser/testdata/degraded.yaml | 58 ++++++ .../KafkaUser/testdata/healthy.yaml | 56 ++++++ .../testdata/progressing_noStatus.yaml | 48 +++++ .../kiali.io/Kiali/health.lua | 23 +++ .../kiali.io/Kiali/health_test.yaml | 13 ++ .../kiali.io/Kiali/testdata/degraded.yaml | 37 ++++ .../kiali.io/Kiali/testdata/healthy.yaml | 37 ++++ .../kiali.io/Kiali/testdata/progressing.yaml | 27 +++ .../ExternalSecret/health.lua | 17 ++ .../ExternalSecret/health_test.yaml | 13 ++ .../ExternalSecret/testdata/degraded.yaml | 14 ++ .../ExternalSecret/testdata/healthy.yaml | 14 ++ .../ExternalSecret/testdata/progressing.yaml | 10 ++ .../kubevirt.io/VirtualMachine/health.lua | 29 +++ .../VirtualMachine/health_test.yaml | 21 +++ .../testdata/healthy_ready.yaml | 63 +++++++ .../testdata/progressing_nostatus.yaml | 54 ++++++ .../testdata/progressing_starting.yaml | 61 +++++++ .../testdata/suspended_paused.yaml | 67 +++++++ .../testdata/suspended_stopped.yaml | 60 +++++++ .../VirtualMachineInstance/health.lua | 36 ++++ .../VirtualMachineInstance/health_test.yaml | 21 +++ .../testdata/healthy_ready.yaml | 104 +++++++++++ .../testdata/progressing_nostatus.yaml | 45 +++++ .../testdata/progressing_pending.yaml | 74 ++++++++ .../testdata/progressing_scheduling.yaml | 92 ++++++++++ .../testdata/suspended_paused.yaml | 110 ++++++++++++ .../testdata/suspended_succeeded.yaml | 100 +++++++++++ .../KeptnAppVersion/health.lua | 14 ++ .../KeptnAppVersion/health_test.yaml | 13 ++ .../KeptnAppVersion/testdata/degraded.yaml | 93 ++++++++++ .../KeptnAppVersion/testdata/healthy.yaml | 93 ++++++++++ .../KeptnAppVersion/testdata/progressing.yaml | 93 ++++++++++ .../KeptnEvaluation/health.lua | 14 ++ .../KeptnEvaluation/health_test.yaml | 13 ++ .../KeptnEvaluation/testdata/degraded.yaml | 33 ++++ .../KeptnEvaluation/testdata/healthy.yaml | 33 ++++ .../KeptnEvaluation/testdata/progressing.yaml | 33 ++++ .../lifecycle.keptn.sh/KeptnTask/health.lua | 14 ++ .../KeptnTask/health_test.yaml | 13 ++ .../KeptnTask/testdata/degraded.yaml | 36 ++++ .../KeptnTask/testdata/healthy.yaml | 37 ++++ .../KeptnTask/testdata/progressing.yaml | 36 ++++ .../KeptnWorkloadInstance/health.lua | 14 ++ .../KeptnWorkloadInstance/health_test.yaml | 13 ++ .../testdata/degraded.yaml | 50 ++++++ .../testdata/healthy.yaml | 51 ++++++ .../testdata/progressing.yaml | 50 ++++++ .../mariadb.mmontes.io/MariaDB/health.lua | 25 +++ .../MariaDB/health_test.yaml | 25 +++ .../MariaDB/testdata/mariadb_error.yaml | 27 +++ .../MariaDB/testdata/no_status.yaml | 22 +++ .../MariaDB/testdata/restore_complete.yaml | 32 ++++ .../testdata/restore_not_complete.yaml | 32 ++++ .../testdata/statefulset_not_ready.yaml | 27 +++ .../MariaDB/testdata/statefulset_ready.yaml | 27 +++ .../minio.min.io/Tenant/health.lua | 61 +++++++ .../minio.min.io/Tenant/health_test.yaml | 45 +++++ .../testdata/another_tenant_exists.yaml | 13 ++ .../incorrect_tenant_credentials.yaml | 13 ++ .../Tenant/testdata/initialized.yaml | 13 ++ .../Tenant/testdata/no_status.yaml | 12 ++ .../Tenant/testdata/out_of_control.yaml | 13 ++ .../pool_decommissioning_not_allowed.yaml | 13 ++ .../Tenant/testdata/provisioning.yaml | 13 ++ .../Tenant/testdata/restarting_minio.yaml | 13 ++ .../testdata/unknown_status_message.yaml | 13 ++ .../Tenant/testdata/updating.yaml | 13 ++ .../Tenant/testdata/versions_mismatch.yaml | 13 ++ .../minio.min.io/Tenant/testdata/waiting.yaml | 13 ++ .../Prometheus/health.lua | 23 +++ .../Prometheus/health_test.yaml | 13 ++ .../Prometheus/testdata/degraded.yaml | 142 +++++++++++++++ .../Prometheus/testdata/healthy.yaml | 130 ++++++++++++++ .../Prometheus/testdata/progressing.yaml | 132 ++++++++++++++ .../ManagedCertificate/health.lua | 23 +++ .../ManagedCertificate/health_test.yaml | 13 ++ .../ManagedCertificate/testdata/active.yaml | 29 +++ .../ManagedCertificate/testdata/failed.yaml | 28 +++ .../testdata/provisioning.yaml | 28 +++ .../OnePasswordItem/health.lua | 21 +++ .../OnePasswordItem/health_test.yaml | 12 ++ .../OnePasswordItem/testdata/degraded.yaml | 13 ++ .../OnePasswordItem/testdata/healthy.yaml | 12 ++ .../OnePasswordItem/testdata/new.yaml | 7 + .../KnativeEventing/health.lua | 36 ++++ .../KnativeEventing/health_test.yaml | 17 ++ .../KnativeEventing/testdata/degraded.yaml | 25 +++ .../KnativeEventing/testdata/healthy.yaml | 25 +++ .../KnativeEventing/testdata/progressing.yaml | 25 +++ .../testdata/progressing_ready_false.yaml | 25 +++ .../KnativeServing/health.lua | 42 +++++ .../KnativeServing/health_test.yaml | 17 ++ .../KnativeServing/testdata/degraded.yaml | 32 ++++ .../KnativeServing/testdata/healthy.yaml | 32 ++++ .../KnativeServing/testdata/progressing.yaml | 32 ++++ .../testdata/progressing_ready_false.yaml | 32 ++++ .../IngressController/health.lua | 31 ++++ .../IngressController/health_test.yaml | 17 ++ .../IngressController/testdata/degraded.yaml | 103 +++++++++++ .../IngressController/testdata/healthy.yaml | 93 ++++++++++ .../testdata/progressing_initialization.yaml | 36 ++++ .../testdata/progressing_pod_rollout.yaml | 101 +++++++++++ .../Subscription/health.lua | 34 ++++ .../Subscription/health_test.yaml | 25 +++ .../testdata/catalog_sources_unhealthy.yaml | 58 ++++++ .../Subscription/testdata/healthy.yaml | 75 ++++++++ .../testdata/install_plan_failed.yaml | 86 +++++++++ .../testdata/install_plan_missing.yaml | 139 +++++++++++++++ .../testdata/install_plan_pending.yaml | 78 +++++++++ .../testdata/resolution_failed.yaml | 72 ++++++++ .../pkg.crossplane.io/Provider/health.lua | 27 +++ .../Provider/health_test.yaml | 17 ++ .../Provider/testdata/degraded_healthy.yaml | 23 +++ .../Provider/testdata/degraded_installed.yaml | 23 +++ .../Provider/testdata/healthy.yaml | 23 +++ .../testdata/progressing_noStatus.yaml | 12 ++ .../platform.confluent.io/Connect/health.lua | 19 ++ .../Connect/health_test.yaml | 9 + .../Connect/testdata/healthy.yaml | 49 ++++++ .../Connect/testdata/progressing.yaml | 49 ++++++ .../ControlCenter/health.lua | 19 ++ .../ControlCenter/health_test.yaml | 9 + .../ControlCenter/testdata/healthy.yaml | 47 +++++ .../ControlCenter/testdata/progressing.yaml | 46 +++++ .../platform.confluent.io/Kafka/health.lua | 19 ++ .../Kafka/health_test.yaml | 9 + .../Kafka/testdata/healthy.yaml | 63 +++++++ .../Kafka/testdata/progressing.yaml | 63 +++++++ .../platform.confluent.io/KsqlDB/health.lua | 19 ++ .../KsqlDB/health_test.yaml | 9 + .../KsqlDB/testdata/healthy.yaml | 44 +++++ .../KsqlDB/testdata/progressing.yaml | 44 +++++ .../SchemaRegistry/health.lua | 19 ++ .../SchemaRegistry/health_test.yaml | 9 + .../SchemaRegistry/testdata/healthy.yaml | 45 +++++ .../SchemaRegistry/testdata/progressing.yaml | 45 +++++ .../Zookeeper/health.lua | 19 ++ .../Zookeeper/health_test.yaml | 9 + .../Zookeeper/testdata/healthy.yaml | 44 +++++ .../Zookeeper/testdata/progressing.yaml | 44 +++++ .../DNSSDServiceInstance/health.lua | 29 +++ .../DNSSDServiceInstance/health_test.yaml | 29 +++ .../testdata/degraded_advertiseError.yaml | 35 ++++ .../testdata/degraded_notAdopted.yaml | 35 ++++ .../testdata/degraded_unadvertiseError.yaml | 35 ++++ .../testdata/healthy.yaml | 35 ++++ .../testdata/progressing_negativeBrowse.yaml | 35 ++++ .../testdata/progressing_negativeLookup.yaml | 35 ++++ .../testdata/unknown_discoveryError.yaml | 35 ++++ .../PubSubSubscription/health.lua | 39 +++++ .../PubSubSubscription/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../testdata/up_to_date.yaml | 9 + .../testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../PubSubTopic/health.lua | 39 +++++ .../PubSubTopic/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../PubSubTopic/testdata/up_to_date.yaml | 9 + .../PubSubTopic/testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../PerconaXtraDBCluster/health.lua | 38 ++++ .../PerconaXtraDBCluster/health_test.yaml | 25 +++ .../PerconaXtraDBCluster/testdata/error.yaml | 24 +++ .../testdata/initializing.yaml | 22 +++ .../PerconaXtraDBCluster/testdata/paused.yaml | 22 +++ .../PerconaXtraDBCluster/testdata/ready.yaml | 22 +++ .../testdata/stopping.yaml | 22 +++ .../testdata/unknown.yaml | 22 +++ .../Project/health.lua | 39 +++++ .../Project/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../Project/testdata/up_to_date.yaml | 9 + .../Project/testdata/update_failed.yaml | 9 + .../Project/testdata/update_in_progress.yaml | 9 + .../rollouts.kruise.io/Rollout/health.lua | 31 ++++ .../Rollout/health_test.yaml | 17 ++ .../Rollout/testdata/degraded.yaml | 50 ++++++ .../Rollout/testdata/healthy.yaml | 56 ++++++ .../Rollout/testdata/progressing.yaml | 48 +++++ .../Rollout/testdata/suspended.yaml | 50 ++++++ .../route.openshift.io/Route/health.lua | 37 ++++ .../route.openshift.io/Route/health_test.yaml | 13 ++ .../Route/testdata/degraded.yaml | 43 +++++ .../Route/testdata/healthy.yaml | 43 +++++ .../Route/testdata/progressing.yaml | 43 +++++ .../ResourceRecordSet/health_test.yaml | 25 +++ .../ResourceRecordSet/heatlh.lua | 41 +++++ .../testdata/degraded_reconcileError.yaml | 35 ++++ .../ResourceRecordSet/testdata/healthy.yaml | 29 +++ .../testdata/progressing_creating.yaml | 29 +++ .../testdata/progressing_noStatus.yaml | 19 ++ .../testdata/suspended_reconcilePaused.yaml | 27 +++ .../serving.knative.dev/Service/health.lua | 40 +++++ .../Service/health_test.yaml | 13 ++ .../Service/testdata/degraded.yaml | 21 +++ .../Service/testdata/healthy.yaml | 17 ++ .../Service/testdata/progressing.yaml | 21 +++ .../InferenceService/health.lua | 40 +++++ .../InferenceService/health_test.yaml | 13 ++ .../InferenceService/testdata/degraded.yaml | 30 ++++ .../InferenceService/testdata/healthy.yaml | 25 +++ .../testdata/progressing.yaml | 28 +++ .../VolumeSnapshot/health.lua | 18 ++ .../VolumeSnapshot/health_test.yaml | 14 ++ .../VolumeSnapshot/testdata/bad.yaml | 14 ++ .../VolumeSnapshot/testdata/good.yaml | 15 ++ .../VolumeSnapshot/testdata/initializing.yaml | 7 + .../VolumeSnapshotContent/health.lua | 18 ++ .../VolumeSnapshotContent/health_test.yaml | 13 ++ .../VolumeSnapshotContent/testdata/bad.yaml | 12 ++ .../VolumeSnapshotContent/testdata/good.yaml | 20 +++ .../testdata/initializing.yaml | 7 + .../SparkApplication/health.lua | 144 +++++++++++++++ .../SparkApplication/health_test.yaml | 29 +++ .../SparkApplication/testdata/degraded.yaml | 33 ++++ .../SparkApplication/testdata/healthy.yaml | 32 ++++ .../testdata/healthy_dynamic_alloc.yaml | 37 ++++ .../healthy_dynamic_alloc_dstream.yaml | 35 ++++ .../healthy_dynamic_alloc_operator_api.yaml | 38 ++++ ...thy_dynamic_alloc_without_spec_config.yaml | 31 ++++ .../testdata/progressing.yaml | 32 ++++ .../spot.io/SpotDeployment/health.lua | 41 +++++ .../spot.io/SpotDeployment/health_test.yaml | 12 ++ .../testdata/degraded_spotdeployment.yaml | 53 ++++++ .../testdata/healthy_spotdeployment.yaml | 54 ++++++ .../testdata/invalid_spec_spotdeployment.yaml | 54 ++++++ .../SQLDatabase/health.lua | 39 +++++ .../SQLDatabase/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../SQLDatabase/testdata/up_to_date.yaml | 9 + .../SQLDatabase/testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../SQLInstance/health.lua | 39 +++++ .../SQLInstance/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../SQLInstance/testdata/up_to_date.yaml | 9 + .../SQLInstance/testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../SQLUser/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../SQLUser/testdata/up_to_date.yaml | 9 + .../SQLUser/testdata/update_failed.yaml | 9 + .../SQLUser/testdata/update_in_progress.yaml | 9 + .../ClusterStackInstall/health.lua | 20 +++ .../ClusterStackInstall/health_test.yaml | 13 ++ .../testdata/being_created_stack.yaml | 34 ++++ .../testdata/installed_stack.yaml | 40 +++++ .../testdata/wait_stack.yaml | 17 ++ .../StorageBucket/health.lua | 39 +++++ .../StorageBucket/health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../StorageBucket/testdata/up_to_date.yaml | 9 + .../StorageBucket/testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../StorageBucketAccessControl/health.lua | 39 +++++ .../health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../testdata/up_to_date.yaml | 9 + .../testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../health.lua | 39 +++++ .../health_test.yaml | 21 +++ .../testdata/dependency_not_found.yaml | 9 + .../testdata/dependency_not_ready.yaml | 9 + .../testdata/up_to_date.yaml | 9 + .../testdata/update_failed.yaml | 9 + .../testdata/update_in_progress.yaml | 9 + .../tower.ansible.com/AnsibleJob/health.lua | 25 +++ .../AnsibleJob/health_test.yaml | 37 ++++ .../testdata/degraded_canceled.yaml | 27 +++ .../AnsibleJob/testdata/degraded_error.yaml | 27 +++ .../AnsibleJob/testdata/degraded_failed.yaml | 27 +++ .../AnsibleJob/testdata/healthy.yaml | 27 +++ .../AnsibleJob/testdata/progressing_new.yaml | 25 +++ .../testdata/progressing_noStatus.yaml | 17 ++ .../testdata/progressing_pending.yaml | 25 +++ .../testdata/progressing_running.yaml | 25 +++ .../testdata/progressing_waiting.yaml | 25 +++ .../TridentBackendConfig/health.lua | 16 ++ .../TridentBackendConfig/health_test.yaml | 13 ++ .../testdata/degraded.yaml | 95 ++++++++++ .../testdata/healthy.yaml | 94 ++++++++++ .../testdata/progressing.yaml | 91 ++++++++++ .../TridentOrchestrator/health.lua | 16 ++ .../TridentOrchestrator/health_test.yaml | 13 ++ .../testdata/degraded.yaml | 91 ++++++++++ .../TridentOrchestrator/testdata/healthy.yaml | 91 ++++++++++ .../testdata/progressing.yaml | 91 ++++++++++ .../ClusterResourceBinding/health.lua | 37 ++++ .../ClusterResourceBinding/health_test.yaml | 31 ++++ .../testdata/degraded_unapplied.yaml | 46 +++++ .../testdata/degraded_unhealth.yaml | 46 +++++ .../testdata/degraded_unknown.yaml | 46 +++++ .../testdata/health.yaml | 83 +++++++++ .../testdata/health_unknown.yaml | 46 +++++ .../testdata/progressing.yaml | 41 +++++ .../progressing_aggregatedStatus.yaml | 46 +++++ .../testdata/progressing_cluster.yaml | 34 ++++ .../ResourceBinding/health.lua | 37 ++++ .../ResourceBinding/health_test.yaml | 31 ++++ .../testdata/degraded_unapplied.yaml | 50 ++++++ .../testdata/degraded_unhealth.yaml | 47 +++++ .../testdata/degraded_unknown.yaml | 47 +++++ .../ResourceBinding/testdata/health.yaml | 84 +++++++++ .../testdata/health_unknown.yaml | 47 +++++ .../ResourceBinding/testdata/progressing.yaml | 42 +++++ .../progressing_aggregatedStatus.yaml | 47 +++++ .../testdata/progressing_cluster.yaml | 35 ++++ .../ZookeeperCluster/health.lua | 11 ++ .../ZookeeperCluster/health_test.yaml | 9 + .../ZookeeperCluster/testdata/healthy.yaml | 21 +++ .../testdata/progressing.yaml | 21 +++ 741 files changed, 23912 insertions(+), 1 deletion(-) delete mode 120000 pkg/resource_customizations create mode 100644 pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/health.lua create mode 100644 pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/health_test.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/activeJobs.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/lastScheduleTime.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/notScheduled.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/suspended.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/BroadcastJob/health.lua create mode 100644 pkg/resource_customizations/apps.kruise.io/BroadcastJob/health_test.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/failed.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/running.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/succeeded.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/suspended.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/CloneSet/health.lua create mode 100644 pkg/resource_customizations/apps.kruise.io/CloneSet/health_test.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/partition_suspended.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/suspended.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/unknown.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/DaemonSet/health.lua create mode 100644 pkg/resource_customizations/apps.kruise.io/DaemonSet/health_test.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/partition_suspended.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/suspended.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/unknown.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/StatefulSet/health.lua create mode 100644 pkg/resource_customizations/apps.kruise.io/StatefulSet/health_test.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/partition_suspended.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/suspended.yaml create mode 100644 pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/unknown.yaml create mode 100644 pkg/resource_customizations/apps.openshift.io/DeploymentConfig/health.lua create mode 100644 pkg/resource_customizations/apps.openshift.io/DeploymentConfig/health_test.yaml create mode 100644 pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/healthy_zero_replicas.yaml create mode 100644 pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/progressing_rc_updated.yaml create mode 100644 pkg/resource_customizations/apps/DaemonSet/actions/action_test.yaml create mode 100644 pkg/resource_customizations/apps/DaemonSet/actions/discovery.lua create mode 100644 pkg/resource_customizations/apps/DaemonSet/actions/restart/action.lua create mode 100644 pkg/resource_customizations/apps/DaemonSet/actions/testdata/daemonset-restarted.yaml create mode 100644 pkg/resource_customizations/apps/DaemonSet/actions/testdata/daemonset.yaml create mode 100644 pkg/resource_customizations/apps/Deployment/actions/action_test.yaml create mode 100644 pkg/resource_customizations/apps/Deployment/actions/discovery.lua create mode 100644 pkg/resource_customizations/apps/Deployment/actions/pause/action.lua create mode 100644 pkg/resource_customizations/apps/Deployment/actions/restart/action.lua create mode 100644 pkg/resource_customizations/apps/Deployment/actions/resume/action.lua create mode 100644 pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-pause.yaml create mode 100644 pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-restarted.yaml create mode 100644 pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-resume.yaml create mode 100644 pkg/resource_customizations/apps/Deployment/actions/testdata/deployment.yaml create mode 100644 pkg/resource_customizations/apps/StatefulSet/actions/action_test.yaml create mode 100644 pkg/resource_customizations/apps/StatefulSet/actions/discovery.lua create mode 100644 pkg/resource_customizations/apps/StatefulSet/actions/restart/action.lua create mode 100644 pkg/resource_customizations/apps/StatefulSet/actions/testdata/statefulset-restarted.yaml create mode 100644 pkg/resource_customizations/apps/StatefulSet/actions/testdata/statefulset.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/actions/action_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/actions/discovery.lua create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/actions/terminate/action.lua create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/failedAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/runningAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/runningAnalysisRun_terminated.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/health.lua create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/health_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/errorAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/errorAnalysisRunWithStatusMessage.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/failedAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/failedAnalysisRunWithStatusMessage.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/inconclusiveAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/inconclusiveAnalysisRunWithStatusMessage.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/noStatusAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/pendingAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/runningAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/successfulAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/terminatedAnalysisRun.yaml create mode 100644 pkg/resource_customizations/argoproj.io/ApplicationSet/health.lua create mode 100644 pkg/resource_customizations/argoproj.io/ApplicationSet/health_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/errorApplicationSetWithStatusMessage.yaml create mode 100644 pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/healthyApplicationSet.yaml create mode 100644 pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/noStatusApplicationSet.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/actions/action_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/actions/create-workflow/action.lua create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/actions/discovery.lua create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/cronworkflow-without-label.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/cronworkflow.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/workflow-without-label.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/workflow.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/health.lua create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/health_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/degradedCronWorkflowWithSpecError.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/degradedCronWorkflowWithSubmissionError.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/healthyCronWorkflow.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/noConditionsCronWorkflow.yaml create mode 100644 pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/withConditionButHealthyCronWorkflow.yaml create mode 100644 pkg/resource_customizations/argoproj.io/EventBus/health.lua create mode 100644 pkg/resource_customizations/argoproj.io/EventBus/health_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/EventBus/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/argoproj.io/EventBus/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Experiment/health.lua create mode 100644 pkg/resource_customizations/argoproj.io/Experiment/health_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Experiment/testdata/errorExperiment.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Experiment/testdata/failedExperiment.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Experiment/testdata/noStatusExperiment.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Experiment/testdata/pendingExperiment.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Experiment/testdata/runningExperiment.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Experiment/testdata/successfulExperiment.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/abort/action.lua create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/action_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/discovery.lua create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/promote-full/action.lua create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/restart/action.lua create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/resume/action.lua create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/retry/action.lua create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/aborted_bg_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/aborted_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/has_pause_condition_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/has_pause_condition_rollout_aborted.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/healthy_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/no_pause_condition_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/one_replica_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_nil_paused_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_not_paused_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_paused_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/promote-full_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/retried_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/rollout_not_restarted.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/rollout_restarted.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/three_replica_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_aborted_bg_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_aborted_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_promote-full_rollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/health.lua create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/health_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/healthy_servingActiveService.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/progressing_addingMoreReplicas.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/progressing_waitingUntilAvailable.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_emptyStepsList.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_executedAllSteps.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_executedAllStepsPreV0.8.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_noSteps.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_killingOldReplicas.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_noSteps.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_setWeightStep.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_abortedRollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_invalidSpec.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_rolloutTimeout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_statusPhaseMessage.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v0.9_observedGeneration.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v0.9_observedGeneration_numeric.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v1.0_newWorkloadGeneration.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_newWorkloadGeneration.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/newRolloutWithoutStatus.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/progressing_newGeneration.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/progressing_newWorkloadGeneration.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_controllerPause.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_userPause.yaml create mode 100644 pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_v1.0_pausedRollout.yaml create mode 100644 pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/action_test.yaml create mode 100644 pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/create-workflow/action.lua create mode 100644 pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/discovery.lua create mode 100644 pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/testdata/workflow.yaml create mode 100644 pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/testdata/workflowtemplate.yaml create mode 100644 pkg/resource_customizations/batch/CronJob/actions/action_test.yaml create mode 100644 pkg/resource_customizations/batch/CronJob/actions/create-job/action.lua create mode 100644 pkg/resource_customizations/batch/CronJob/actions/discovery.lua create mode 100644 pkg/resource_customizations/batch/CronJob/actions/testdata/cronjob.yaml create mode 100644 pkg/resource_customizations/batch/CronJob/actions/testdata/job.yaml create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/health.lua create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/health_test.yaml create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/invalid.yaml create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_green.yaml create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_red.yaml create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_yellow.yaml create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_yellow_single_node.yaml create mode 100644 pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/unknown.yaml create mode 100644 pkg/resource_customizations/bitnami.com/SealedSecret/health.lua create mode 100644 pkg/resource_customizations/bitnami.com/SealedSecret/health_test.yaml create mode 100644 pkg/resource_customizations/bitnami.com/SealedSecret/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/bitnami.com/SealedSecret/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/bitnami.com/SealedSecret/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/cassandra.rook.io/Cluster/health.lua create mode 100644 pkg/resource_customizations/cassandra.rook.io/Cluster/health_test.yaml create mode 100644 pkg/resource_customizations/cassandra.rook.io/Cluster/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/cassandra.rook.io/Cluster/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/cdi.kubevirt.io/DataVolume/health.lua create mode 100644 pkg/resource_customizations/cdi.kubevirt.io/DataVolume/health_test.yaml create mode 100644 pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/degraded_badurl.yaml create mode 100644 pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/healthy_succeeded.yaml create mode 100644 pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_containercreating.yaml create mode 100644 pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_importing.yaml create mode 100644 pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_nostatus.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Certificate/health.lua create mode 100644 pkg/resource_customizations/cert-manager.io/Certificate/health_test.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Certificate/testdata/degraded_configError.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Certificate/testdata/healthy_issued.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Certificate/testdata/healthy_renewed.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_issuing.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_issuing_last.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/ClusterIssuer/health.lua create mode 100644 pkg/resource_customizations/cert-manager.io/ClusterIssuer/health_test.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/degraded_acmeFailed.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/healthy_registered.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Issuer/health.lua create mode 100644 pkg/resource_customizations/cert-manager.io/Issuer/health_test.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Issuer/testdata/degraded_acmeFailed.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Issuer/testdata/healthy_registered.yaml create mode 100644 pkg/resource_customizations/cert-manager.io/Issuer/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Certificate/health.lua create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Certificate/health_test.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/degraded_configError.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/healthy_issued.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/healthy_renewed.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Issuer/health.lua create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Issuer/health_test.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/degraded_acmeFailed.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/healthy_registered.yaml create mode 100644 pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/health.lua create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/health_test.yaml create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/degraded_reconcileError.yaml create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_creating.yaml create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_noavailable.yaml create mode 100644 pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/suspended.yaml create mode 100644 pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/health.lua create mode 100644 pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/health_test.yaml create mode 100644 pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/health.lua create mode 100644 pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/health_test.yaml create mode 100644 pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Cluster/health.lua create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Cluster/health_test.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/degraded_failed.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/error_provisioned.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/healthy_provisioned.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/progressing_provisioning.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/suspended_paused.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Machine/health.lua create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Machine/health_test.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/degraded_failed.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/healthy_running.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/progressing_boot.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/health.lua create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/health_test.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/degraded_failed.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/healthy_provisioned.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/progressing_ScalingDown.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/progressing_ScalingUp.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/suspended_paused.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/health.lua create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/health_test.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/testdata/degraded_expectedMachines.yaml create mode 100644 pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/health.lua create mode 100644 pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/health_test.yaml create mode 100644 pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasMigration/health.lua create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasMigration/health_test.yaml create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasSchema/health.lua create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasSchema/health_test.yaml create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/health.lua create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/health_test.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/applyingchanges.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/invalid.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/migratingdata.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_green.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_red.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_yellow.yaml create mode 100644 pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/unknown.yaml create mode 100644 pkg/resource_customizations/embed.go create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/health.lua create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/health_test.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/notready.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/partiallyready-multiple-conditions.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/partiallyready.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterSecretStore/health.lua create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterSecretStore/health_test.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterSecretStore/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ClusterSecretStore/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/action_test.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/discovery.lua create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/refresh/action.lua create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/testdata/external-secret-updated.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/testdata/external-secret.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/health.lua create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/health_test.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/actions/action_test.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/actions/discovery.lua create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/actions/push/action.lua create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/actions/testdata/push-secret-updated.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/actions/testdata/push-secret.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/health.lua create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/health_test.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/PushSecret/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/SecretStore/health.lua create mode 100644 pkg/resource_customizations/external-secrets.io/SecretStore/health_test.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/SecretStore/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/external-secrets.io/SecretStore/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/flagger.app/Canary/health.lua create mode 100644 pkg/resource_customizations/flagger.app/Canary/health_test.yaml create mode 100644 pkg/resource_customizations/flagger.app/Canary/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/flagger.app/Canary/testdata/finalising.yaml create mode 100644 pkg/resource_customizations/flagger.app/Canary/testdata/initialized.yaml create mode 100644 pkg/resource_customizations/flagger.app/Canary/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/flagger.app/Canary/testdata/promoting.yaml create mode 100644 pkg/resource_customizations/flagger.app/Canary/testdata/succeeded.yaml create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/health.lua create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/health_test.yaml create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/degraded_error.yaml create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_running_v0.1.x.yaml create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_running_v1.x.yaml create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_suspended_v0.1.x.yaml create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_suspended_v1.x.yaml create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/progressing_deployedNotReady.yaml create mode 100644 pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/progressing_deploying.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/health.lua create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/health_test.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/health.lua create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/health_test.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/health.lua create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/health_test.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/health.lua create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/health_test.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/health.lua create mode 100644 pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/health_test.yaml create mode 100644 pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/degraded_error.yaml create mode 100644 pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/degraded_rolesMaxLimitReached.yaml create mode 100644 pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/install.istio.io/IstioOperator/health.lua create mode 100644 pkg/resource_customizations/install.istio.io/IstioOperator/health_test.yaml create mode 100644 pkg/resource_customizations/install.istio.io/IstioOperator/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/install.istio.io/IstioOperator/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/install.istio.io/IstioOperator/testdata/progressing_reconciling.yaml create mode 100644 pkg/resource_customizations/install.istio.io/IstioOperator/testdata/progressing_updating.yaml create mode 100644 pkg/resource_customizations/jaegertracing.io/Jaeger/health.lua create mode 100644 pkg/resource_customizations/jaegertracing.io/Jaeger/health_test.yaml create mode 100644 pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/health.lua create mode 100644 pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/health_test.yaml create mode 100644 pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/reconciling.yaml create mode 100644 pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/updating.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/Kafka/health.lua create mode 100644 pkg/resource_customizations/kafka.strimzi.io/Kafka/health_test.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/health.lua create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/health_test.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/health.lua create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/health_test.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaUser/health.lua create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaUser/health_test.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/kiali.io/Kiali/health.lua create mode 100644 pkg/resource_customizations/kiali.io/Kiali/health_test.yaml create mode 100644 pkg/resource_customizations/kiali.io/Kiali/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/kiali.io/Kiali/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/kiali.io/Kiali/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/kubernetes-client.io/ExternalSecret/health.lua create mode 100644 pkg/resource_customizations/kubernetes-client.io/ExternalSecret/health_test.yaml create mode 100644 pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachine/health.lua create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachine/health_test.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/healthy_ready.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/progressing_nostatus.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/progressing_starting.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/suspended_paused.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/suspended_stopped.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/health.lua create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/health_test.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/healthy_ready.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_nostatus.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_pending.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_scheduling.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/suspended_paused.yaml create mode 100644 pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/suspended_succeeded.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/health.lua create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/health_test.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/health.lua create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/health_test.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/health.lua create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/health_test.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/health.lua create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/health_test.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/mariadb.mmontes.io/MariaDB/health.lua create mode 100644 pkg/resource_customizations/mariadb.mmontes.io/MariaDB/health_test.yaml create mode 100644 pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/mariadb_error.yaml create mode 100644 pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/no_status.yaml create mode 100644 pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/restore_complete.yaml create mode 100644 pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/restore_not_complete.yaml create mode 100644 pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/statefulset_not_ready.yaml create mode 100644 pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/statefulset_ready.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/health.lua create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/health_test.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/another_tenant_exists.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/incorrect_tenant_credentials.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/initialized.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/no_status.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/out_of_control.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/pool_decommissioning_not_allowed.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/provisioning.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/restarting_minio.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/unknown_status_message.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/updating.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/versions_mismatch.yaml create mode 100644 pkg/resource_customizations/minio.min.io/Tenant/testdata/waiting.yaml create mode 100644 pkg/resource_customizations/monitoring.coreos.com/Prometheus/health.lua create mode 100644 pkg/resource_customizations/monitoring.coreos.com/Prometheus/health_test.yaml create mode 100644 pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/networking.gke.io/ManagedCertificate/health.lua create mode 100644 pkg/resource_customizations/networking.gke.io/ManagedCertificate/health_test.yaml create mode 100644 pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/active.yaml create mode 100644 pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/failed.yaml create mode 100644 pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/provisioning.yaml create mode 100644 pkg/resource_customizations/onepassword.com/OnePasswordItem/health.lua create mode 100644 pkg/resource_customizations/onepassword.com/OnePasswordItem/health_test.yaml create mode 100644 pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/new.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeEventing/health.lua create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeEventing/health_test.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/progressing_ready_false.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeServing/health.lua create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeServing/health_test.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/progressing_ready_false.yaml create mode 100644 pkg/resource_customizations/operator.openshift.io/IngressController/health.lua create mode 100644 pkg/resource_customizations/operator.openshift.io/IngressController/health_test.yaml create mode 100644 pkg/resource_customizations/operator.openshift.io/IngressController/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/operator.openshift.io/IngressController/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/operator.openshift.io/IngressController/testdata/progressing_initialization.yaml create mode 100644 pkg/resource_customizations/operator.openshift.io/IngressController/testdata/progressing_pod_rollout.yaml create mode 100644 pkg/resource_customizations/operators.coreos.com/Subscription/health.lua create mode 100644 pkg/resource_customizations/operators.coreos.com/Subscription/health_test.yaml create mode 100644 pkg/resource_customizations/operators.coreos.com/Subscription/testdata/catalog_sources_unhealthy.yaml create mode 100644 pkg/resource_customizations/operators.coreos.com/Subscription/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_failed.yaml create mode 100644 pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_missing.yaml create mode 100644 pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_pending.yaml create mode 100644 pkg/resource_customizations/operators.coreos.com/Subscription/testdata/resolution_failed.yaml create mode 100644 pkg/resource_customizations/pkg.crossplane.io/Provider/health.lua create mode 100644 pkg/resource_customizations/pkg.crossplane.io/Provider/health_test.yaml create mode 100644 pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/degraded_healthy.yaml create mode 100644 pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/degraded_installed.yaml create mode 100644 pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Connect/health.lua create mode 100644 pkg/resource_customizations/platform.confluent.io/Connect/health_test.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Connect/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Connect/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/ControlCenter/health.lua create mode 100644 pkg/resource_customizations/platform.confluent.io/ControlCenter/health_test.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/ControlCenter/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/ControlCenter/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Kafka/health.lua create mode 100644 pkg/resource_customizations/platform.confluent.io/Kafka/health_test.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Kafka/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Kafka/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/KsqlDB/health.lua create mode 100644 pkg/resource_customizations/platform.confluent.io/KsqlDB/health_test.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/KsqlDB/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/KsqlDB/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/SchemaRegistry/health.lua create mode 100644 pkg/resource_customizations/platform.confluent.io/SchemaRegistry/health_test.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/SchemaRegistry/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/SchemaRegistry/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Zookeeper/health.lua create mode 100644 pkg/resource_customizations/platform.confluent.io/Zookeeper/health_test.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Zookeeper/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/platform.confluent.io/Zookeeper/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/health.lua create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/health_test.yaml create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_advertiseError.yaml create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_notAdopted.yaml create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_unadvertiseError.yaml create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/progressing_negativeBrowse.yaml create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/progressing_negativeLookup.yaml create mode 100644 pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/unknown_discoveryError.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/health.lua create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/health_test.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/health.lua create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/health_test.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/health.lua create mode 100644 pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/health_test.yaml create mode 100644 pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/error.yaml create mode 100644 pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/initializing.yaml create mode 100644 pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/paused.yaml create mode 100644 pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/ready.yaml create mode 100644 pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/stopping.yaml create mode 100644 pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/unknown.yaml create mode 100644 pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/health.lua create mode 100644 pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/health_test.yaml create mode 100644 pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/rollouts.kruise.io/Rollout/health.lua create mode 100644 pkg/resource_customizations/rollouts.kruise.io/Rollout/health_test.yaml create mode 100644 pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/suspended.yaml create mode 100644 pkg/resource_customizations/route.openshift.io/Route/health.lua create mode 100644 pkg/resource_customizations/route.openshift.io/Route/health_test.yaml create mode 100644 pkg/resource_customizations/route.openshift.io/Route/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/route.openshift.io/Route/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/route.openshift.io/Route/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/health_test.yaml create mode 100644 pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/heatlh.lua create mode 100644 pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/degraded_reconcileError.yaml create mode 100644 pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/progressing_creating.yaml create mode 100644 pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/suspended_reconcilePaused.yaml create mode 100644 pkg/resource_customizations/serving.knative.dev/Service/health.lua create mode 100644 pkg/resource_customizations/serving.knative.dev/Service/health_test.yaml create mode 100644 pkg/resource_customizations/serving.knative.dev/Service/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/serving.knative.dev/Service/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/serving.knative.dev/Service/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/serving.kserve.io/InferenceService/health.lua create mode 100644 pkg/resource_customizations/serving.kserve.io/InferenceService/health_test.yaml create mode 100644 pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/health.lua create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/health_test.yaml create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/bad.yaml create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/good.yaml create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/initializing.yaml create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/health.lua create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/health_test.yaml create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/bad.yaml create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/good.yaml create mode 100644 pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/initializing.yaml create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/health.lua create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/health_test.yaml create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc.yaml create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_dstream.yaml create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_operator_api.yaml create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_without_spec_config.yaml create mode 100644 pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/spot.io/SpotDeployment/health.lua create mode 100644 pkg/resource_customizations/spot.io/SpotDeployment/health_test.yaml create mode 100644 pkg/resource_customizations/spot.io/SpotDeployment/testdata/degraded_spotdeployment.yaml create mode 100644 pkg/resource_customizations/spot.io/SpotDeployment/testdata/healthy_spotdeployment.yaml create mode 100644 pkg/resource_customizations/spot.io/SpotDeployment/testdata/invalid_spec_spotdeployment.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/health.lua create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/health_test.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/health.lua create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/health_test.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/health_test.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/health.lua create mode 100644 pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/health_test.yaml create mode 100644 pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/being_created_stack.yaml create mode 100644 pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/installed_stack.yaml create mode 100644 pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/wait_stack.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/health.lua create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/health_test.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/health.lua create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/health_test.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/health.lua create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/health_test.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/dependency_not_found.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/dependency_not_ready.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/up_to_date.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/update_failed.yaml create mode 100644 pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/update_in_progress.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/health.lua create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/health_test.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_canceled.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_error.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_failed.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_new.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_noStatus.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_pending.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_running.yaml create mode 100644 pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_waiting.yaml create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/health.lua create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/health_test.yaml create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/health.lua create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/health_test.yaml create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/degraded.yaml create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/health.lua create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/health_test.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unapplied.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unhealth.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unknown.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/health.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/health_unknown.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing_aggregatedStatus.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing_cluster.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/health.lua create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/health_test.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unapplied.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unhealth.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unknown.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/health.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/health_unknown.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing_aggregatedStatus.yaml create mode 100644 pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing_cluster.yaml create mode 100644 pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/health.lua create mode 100644 pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/health_test.yaml create mode 100644 pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/testdata/healthy.yaml create mode 100644 pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/testdata/progressing.yaml diff --git a/pkg/resource_customizations b/pkg/resource_customizations deleted file mode 120000 index 9ac2ee6..0000000 --- a/pkg/resource_customizations +++ /dev/null @@ -1 +0,0 @@ -../argo-cd/resource_customizations \ No newline at end of file diff --git a/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/health.lua b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/health.lua new file mode 100644 index 0000000..1e68d86 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/health.lua @@ -0,0 +1,36 @@ +hs = { status = "Progressing", message = "AdvancedCronJobs has active jobs" } +-- Extract lastScheduleTime and convert to time objects +lastScheduleTime = nil + +if obj.status.lastScheduleTime ~= nil then + local year, month, day, hour, min, sec = string.match(obj.status.lastScheduleTime, "(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z") + lastScheduleTime = os.time({year=year, month=month, day=day, hour=hour, min=min, sec=sec}) +end + + +if lastScheduleTime == nil and obj.spec.paused == true then + hs.status = "Suspended" + hs.message = "AdvancedCronJob is Paused" + return hs +end + +-- AdvancedCronJobs are progressing if they have any object in the "active" state +if obj.status.active ~= nil and #obj.status.active > 0 then + hs.status = "Progressing" + hs.message = "AdvancedCronJobs has active jobs" + return hs +end +-- AdvancedCronJobs are Degraded if they don't have lastScheduleTime +if lastScheduleTime == nil then + hs.status = "Degraded" + hs.message = "AdvancedCronJobs has not run successfully" + return hs +end +-- AdvancedCronJobs are healthy if they have lastScheduleTime +if lastScheduleTime ~= nil then + hs.status = "Healthy" + hs.message = "AdvancedCronJobs has run successfully" + return hs +end + +return hs diff --git a/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/health_test.yaml b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/health_test.yaml new file mode 100644 index 0000000..939c701 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/health_test.yaml @@ -0,0 +1,17 @@ +tests: + - healthStatus: + status: Healthy + message: AdvancedCronJobs has run successfully + inputPath: testdata/lastScheduleTime.yaml + - healthStatus: + status: Degraded + message: AdvancedCronJobs has not run successfully + inputPath: testdata/notScheduled.yaml + - healthStatus: + status: Progressing + message: AdvancedCronJobs has active jobs + inputPath: testdata/activeJobs.yaml + - healthStatus: + status: Suspended + message: AdvancedCronJob is Paused + inputPath: testdata/suspended.yaml diff --git a/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/activeJobs.yaml b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/activeJobs.yaml new file mode 100644 index 0000000..5748143 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/activeJobs.yaml @@ -0,0 +1,30 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: AdvancedCronJob +metadata: + name: acj-test +spec: + schedule: "*/1 * * * *" + template: + broadcastJobTemplate: + spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never + completionPolicy: + type: Always + ttlSecondsAfterFinished: 30 + +status: + active: + - apiVersion: apps.kruise.io/v1alpha1 + kind: BroadcastJob + name: acj-test-1694882400 + namespace: default + resourceVersion: '4012' + uid: 2b08a429-a43b-4382-8e5d-3db0c72b5b13 + lastScheduleTime: '2023-09-16T16:40:00Z' + type: BroadcastJob diff --git a/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/lastScheduleTime.yaml b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/lastScheduleTime.yaml new file mode 100644 index 0000000..bf48bdb --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/lastScheduleTime.yaml @@ -0,0 +1,23 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: AdvancedCronJob +metadata: + name: acj-test +spec: + schedule: "*/1 * * * *" + template: + broadcastJobTemplate: + spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never + completionPolicy: + type: Always + ttlSecondsAfterFinished: 30 + +status: + lastScheduleTime: "2023-09-16T16:29:00Z" + type: BroadcastJob diff --git a/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/notScheduled.yaml b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/notScheduled.yaml new file mode 100644 index 0000000..cc8a9dd --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/notScheduled.yaml @@ -0,0 +1,22 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: AdvancedCronJob +metadata: + name: acj-test +spec: + schedule: "*/1 * * * *" + template: + broadcastJobTemplate: + spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never + completionPolicy: + type: Always + ttlSecondsAfterFinished: 30 + +status: + lastScheduleTime: null diff --git a/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/suspended.yaml b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/suspended.yaml new file mode 100644 index 0000000..dc79f1b --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/suspended.yaml @@ -0,0 +1,23 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: AdvancedCronJob +metadata: + name: acj-test +spec: + schedule: "*/1 * * * *" + template: + broadcastJobTemplate: + spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never + completionPolicy: + type: Always + ttlSecondsAfterFinished: 30 + paused: true + +status: + type: BroadcastJob diff --git a/pkg/resource_customizations/apps.kruise.io/BroadcastJob/health.lua b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/health.lua new file mode 100644 index 0000000..3b20ca8 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/health.lua @@ -0,0 +1,32 @@ +hs={ status= "Progressing", message= "BroadcastJob is still running" } + +if obj.status ~= nil then + +-- BroadcastJob are healthy if desired number and succeeded number is equal + if obj.status.desired == obj.status.succeeded and obj.status.phase == "completed" then + hs.status = "Healthy" + hs.message = "BroadcastJob is completed successfully" + return hs + end +-- BroadcastJob are progressing if active is not equal to 0 + if obj.status.active ~= 0 and obj.status.phase == "running" then + hs.status = "Progressing" + hs.message = "BroadcastJob is still running" + return hs + end +-- BroadcastJob are progressing if failed is not equal to 0 + if obj.status.failed ~= 0 and obj.status.phase == "failed" then + hs.status = "Degraded" + hs.message = "BroadcastJob failed" + return hs + end + + if obj.status.phase == "paused" and obj.spec.paused == true then + hs.status = "Suspended" + hs.message = "BroadcastJob is Paused" + return hs + end + +end + +return hs diff --git a/pkg/resource_customizations/apps.kruise.io/BroadcastJob/health_test.yaml b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/health_test.yaml new file mode 100644 index 0000000..e3e16e2 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/health_test.yaml @@ -0,0 +1,17 @@ +tests: + - healthStatus: + status: Healthy + message: "BroadcastJob is completed successfully" + inputPath: testdata/succeeded.yaml + - healthStatus: + status: Degraded + message: "BroadcastJob failed" + inputPath: testdata/failed.yaml + - healthStatus: + status: Progressing + message: "BroadcastJob is still running" + inputPath: testdata/running.yaml + - healthStatus: + status: Suspended + message: "BroadcastJob is Paused" + inputPath: testdata/suspended.yaml diff --git a/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/failed.yaml b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/failed.yaml new file mode 100644 index 0000000..88b85ca --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/failed.yaml @@ -0,0 +1,31 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: BroadcastJob +metadata: + name: failed-job +spec: + template: + spec: + containers: + - name: guestbook + image: openkruise/guestbook:v3 + command: ["exit", "1"] # a dummy command to fail + restartPolicy: Never + completionPolicy: + type: Always + ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds + +status: + active: 0 + completionTime: '2023-09-17T14:31:38Z' + conditions: + - lastProbeTime: '2023-09-17T14:31:38Z' + lastTransitionTime: '2023-09-17T14:31:38Z' + message: failure policy is FailurePolicyTypeFailFast and failed pod is found + reason: Failed + status: 'True' + type: Failed + desired: 1 + failed: 1 + phase: failed + startTime: '2023-09-17T14:31:32Z' + succeeded: 0 diff --git a/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/running.yaml b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/running.yaml new file mode 100644 index 0000000..f679fa3 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/running.yaml @@ -0,0 +1,22 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: BroadcastJob +metadata: + name: download-image +spec: + template: + spec: + containers: + - name: guestbook + image: openkruise/guestbook:v3 + command: ["echo", "started"] # a dummy command to do nothing + restartPolicy: Never + completionPolicy: + type: Always + ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds +status: + active: 1 + desired: 1 + failed: 0 + phase: running + startTime: '2023-09-17T14:43:30Z' + succeeded: 0 diff --git a/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/succeeded.yaml b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/succeeded.yaml new file mode 100644 index 0000000..61746b2 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/succeeded.yaml @@ -0,0 +1,31 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: BroadcastJob +metadata: + name: download-image +spec: + template: + spec: + containers: + - name: guestbook + image: openkruise/guestbook:v3 + command: ["echo", "started"] # a dummy command to do nothing + restartPolicy: Never + completionPolicy: + type: Always + ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds +status: + active: 0 + completionTime: '2023-09-17T14:35:14Z' + conditions: + - lastProbeTime: '2023-09-17T14:35:14Z' + lastTransitionTime: '2023-09-17T14:35:14Z' + message: Job completed, 1 pods succeeded, 0 pods failed + reason: Complete + status: 'True' + type: Complete + desired: 1 + failed: 0 + phase: completed + startTime: '2023-09-17T14:35:07Z' + succeeded: 1 + diff --git a/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/suspended.yaml b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/suspended.yaml new file mode 100644 index 0000000..60a9b58 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/BroadcastJob/testdata/suspended.yaml @@ -0,0 +1,31 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: BroadcastJob +metadata: + name: download-image +spec: + template: + spec: + containers: + - name: guestbook + image: openkruise/guestbook:v3 + command: ["echo", "started"] # a dummy command to do nothing + restartPolicy: Never + paused: true + completionPolicy: + type: Always + ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds +status: + active: 0 + completionTime: '2023-09-17T14:35:14Z' + conditions: + - lastProbeTime: '2023-09-17T14:35:14Z' + lastTransitionTime: '2023-09-17T14:35:14Z' + message: Job completed, 1 pods succeeded, 0 pods failed + reason: Complete + status: 'True' + type: Complete + desired: 1 + failed: 0 + phase: paused + startTime: '2023-09-17T14:35:07Z' + succeeded: 0 diff --git a/pkg/resource_customizations/apps.kruise.io/CloneSet/health.lua b/pkg/resource_customizations/apps.kruise.io/CloneSet/health.lua new file mode 100644 index 0000000..197ab75 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/CloneSet/health.lua @@ -0,0 +1,33 @@ +hs={ status = "Progressing", message = "Waiting for initialization" } + +if obj.status ~= nil then + + if obj.metadata.generation == obj.status.observedGeneration then + + if obj.spec.updateStrategy.paused == true or not obj.status.updatedAvailableReplicas then + hs.status = "Suspended" + hs.message = "Cloneset is paused" + return hs + elseif obj.spec.updateStrategy.partition ~= 0 and obj.metadata.generation > 1 then + if obj.status.updatedReplicas >= obj.status.expectedUpdatedReplicas then + hs.status = "Suspended" + hs.message = "Cloneset needs manual intervention" + return hs + end + + elseif obj.status.updatedAvailableReplicas == obj.status.replicas then + hs.status = "Healthy" + hs.message = "All Cloneset workloads are ready and updated" + return hs + + else + if obj.status.updatedAvailableReplicas ~= obj.status.replicas then + hs.status = "Degraded" + hs.message = "Some replicas are not ready or available" + return hs + end + end + end +end + +return hs diff --git a/pkg/resource_customizations/apps.kruise.io/CloneSet/health_test.yaml b/pkg/resource_customizations/apps.kruise.io/CloneSet/health_test.yaml new file mode 100644 index 0000000..e740eca --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/CloneSet/health_test.yaml @@ -0,0 +1,21 @@ +tests: + - healthStatus: + status: Healthy + message: "All Cloneset workloads are ready and updated" + inputPath: testdata/healthy.yaml + - healthStatus: + status: Degraded + message: "Some replicas are not ready or available" + inputPath: testdata/degraded.yaml + - healthStatus: + status: Progressing + message: "Waiting for initialization" + inputPath: testdata/unknown.yaml + - healthStatus: + status: Suspended + message: "Cloneset is paused" + inputpath: testdata/suspended.yaml + - healthStatus: + status: Suspended + message: "Cloneset needs manual intervention" + inputpath: testdata/partition_suspended.yaml diff --git a/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/degraded.yaml b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/degraded.yaml new file mode 100644 index 0000000..36e9a0d --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/degraded.yaml @@ -0,0 +1,35 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: CloneSet +metadata: + name: cloneset-test + namespace: kruise + generation: 1 + labels: + app: sample +spec: + replicas: 2 + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + paused: false + +status: + observedGeneration: 1 + replicas: 2 + updatedReadyReplicas: 1 + updatedAvailableReplicas: 1 + conditions: + - lastTransitionTime: "2021-09-21T22:35:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: 'True' + type: FailedScale diff --git a/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/healthy.yaml b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/healthy.yaml new file mode 100644 index 0000000..8a19353 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/healthy.yaml @@ -0,0 +1,36 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: CloneSet +metadata: + name: cloneset-test + namespace: kruise + generation: 1 + labels: + app: sample +spec: + replicas: 1 + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + paused: false + + +status: + observedGeneration: 1 + replicas: 2 + updatedReadyReplicas: 2 + updatedAvailableReplicas: 2 + conditions: + - lastTransitionTime: "2021-09-21T22:35:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: 'True' + type: FailedScale diff --git a/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/partition_suspended.yaml b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/partition_suspended.yaml new file mode 100644 index 0000000..674c522 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/partition_suspended.yaml @@ -0,0 +1,31 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: CloneSet +metadata: + name: cloneset-test + namespace: kruise + generation: 2 + labels: + app: sample +spec: + replicas: 5 + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + partition: 3 + +status: + observedGeneration: 2 + replicas: 5 + expectedUpdatedReplicas: 2 + updatedReadyReplicas: 1 + updatedAvailableReplicas: 1 + updatedReplicas: 3 diff --git a/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/suspended.yaml b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/suspended.yaml new file mode 100644 index 0000000..9edfaca --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/suspended.yaml @@ -0,0 +1,35 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: CloneSet +metadata: + name: cloneset-test + namespace: kruise + generation: 2 + labels: + app: sample +spec: + replicas: 1 + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + paused: true + +status: + observedGeneration: 2 + replicas: 2 + updatedReadyReplicas: 2 + updatedAvailableReplicas: 2 + conditions: + - lastTransitionTime: "2021-09-21T22:35:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: 'True' + type: FailedScale diff --git a/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/unknown.yaml b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/unknown.yaml new file mode 100644 index 0000000..c1ccdb2 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/CloneSet/testdata/unknown.yaml @@ -0,0 +1,5 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: CloneSet +metadata: + name: cloneset-test + namespace: kruise diff --git a/pkg/resource_customizations/apps.kruise.io/DaemonSet/health.lua b/pkg/resource_customizations/apps.kruise.io/DaemonSet/health.lua new file mode 100644 index 0000000..7705bcc --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/DaemonSet/health.lua @@ -0,0 +1,35 @@ +hs={ status = "Progressing", message = "Waiting for initialization" } + +if obj.status ~= nil then + + if obj.metadata.generation == obj.status.observedGeneration then + + if obj.spec.updateStrategy.rollingUpdate.paused == true or not obj.status.updatedNumberScheduled then + hs.status = "Suspended" + hs.message = "Daemonset is paused" + return hs + elseif obj.spec.updateStrategy.rollingUpdate.partition ~= 0 and obj.metadata.generation > 1 then + if obj.status.updatedNumberScheduled > (obj.status.desiredNumberScheduled - obj.spec.updateStrategy.rollingUpdate.partition) then + hs.status = "Suspended" + hs.message = "Daemonset needs manual intervention" + return hs + end + + elseif (obj.status.updatedNumberScheduled == obj.status.desiredNumberScheduled) and (obj.status.numberAvailable == obj.status.desiredNumberScheduled) then + hs.status = "Healthy" + hs.message = "All Daemonset workloads are ready and updated" + return hs + + else + if (obj.status.updatedNumberScheduled == obj.status.desiredNumberScheduled) and (obj.status.numberUnavailable == obj.status.desiredNumberScheduled) then + hs.status = "Degraded" + hs.message = "Some pods are not ready or available" + return hs + end + end + + end + +end + +return hs diff --git a/pkg/resource_customizations/apps.kruise.io/DaemonSet/health_test.yaml b/pkg/resource_customizations/apps.kruise.io/DaemonSet/health_test.yaml new file mode 100644 index 0000000..0a8c829 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/DaemonSet/health_test.yaml @@ -0,0 +1,21 @@ +tests: + - healthStatus: + status: Healthy + message: "All Daemonset workloads are ready and updated" + inputPath: testdata/healthy.yaml + - healthStatus: + status: Degraded + message: "Some pods are not ready or available" + inputPath: testdata/degraded.yaml + - healthStatus: + status: Progressing + message: "Waiting for initialization" + inputPath: testdata/unknown.yaml + - healthStatus: + status: Suspended + message: "Daemonset is paused" + inputPath: testdata/suspended.yaml + - healthStatus: + status: Suspended + message: "Daemonset needs manual intervention" + inputPath: testdata/partition_suspended.yaml diff --git a/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/degraded.yaml b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/degraded.yaml new file mode 100644 index 0000000..ed8cbc0 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/degraded.yaml @@ -0,0 +1,34 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: DaemonSet +metadata: + name: daemonset-test + namespace: kruise + generation: 1 + labels: + app: sample +spec: + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + rollingUpdate: + partition: 0 + paused: false + +status: + currentNumberScheduled: 1 + daemonSetHash: 5dffcdfcd7 + desiredNumberScheduled: 1 + numberUnavailable: 1 + numberMisscheduled: 0 + numberReady: 0 + observedGeneration: 1 + updatedNumberScheduled: 1 diff --git a/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/healthy.yaml b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/healthy.yaml new file mode 100644 index 0000000..6224ebf --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/healthy.yaml @@ -0,0 +1,34 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: DaemonSet +metadata: + name: daemonset-test + namespace: kruise + generation: 1 + labels: + app: sample +spec: + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + rollingUpdate: + partition: 0 + paused: false + +status: + currentNumberScheduled: 1 + daemonSetHash: 5dffcdfcd7 + desiredNumberScheduled: 1 + numberAvailable: 1 + numberMisscheduled: 0 + numberReady: 1 + observedGeneration: 1 + updatedNumberScheduled: 1 diff --git a/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/partition_suspended.yaml b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/partition_suspended.yaml new file mode 100644 index 0000000..4c0819c --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/partition_suspended.yaml @@ -0,0 +1,33 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: DaemonSet +metadata: + name: daemonset-test + namespace: kruise + generation: 6 + labels: + app: sample +spec: + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + rollingUpdate: + partition: 4 + +status: + currentNumberScheduled: 1 + daemonSetHash: 5f8cdcdc65 + desiredNumberScheduled: 10 + numberAvailable: 10 + numberMisscheduled: 0 + numberReady: 10 + observedGeneration: 6 + updatedNumberScheduled: 7 diff --git a/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/suspended.yaml b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/suspended.yaml new file mode 100644 index 0000000..fb705f5 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/suspended.yaml @@ -0,0 +1,33 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: DaemonSet +metadata: + name: daemonset-test + namespace: kruise + generation: 1 + labels: + app: sample +spec: + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + rollingUpdate: + paused: true + +status: + currentNumberScheduled: 1 + daemonSetHash: 5dffcdfcd7 + desiredNumberScheduled: 1 + numberAvailable: 1 + numberMisscheduled: 0 + numberReady: 1 + observedGeneration: 1 + updatedNumberScheduled: 1 diff --git a/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/unknown.yaml b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/unknown.yaml new file mode 100644 index 0000000..aa5791c --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/DaemonSet/testdata/unknown.yaml @@ -0,0 +1,5 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: DaemonSet +metadata: + name: daemonset-test + namespace: kruise diff --git a/pkg/resource_customizations/apps.kruise.io/StatefulSet/health.lua b/pkg/resource_customizations/apps.kruise.io/StatefulSet/health.lua new file mode 100644 index 0000000..4734045 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/StatefulSet/health.lua @@ -0,0 +1,35 @@ +hs={ status = "Progressing", message = "Waiting for initialization" } + +if obj.status ~= nil then + + if obj.metadata.generation == obj.status.observedGeneration then + + if obj.spec.updateStrategy.rollingUpdate.paused == true or not obj.status.updatedAvailableReplicas then + hs.status = "Suspended" + hs.message = "Statefulset is paused" + return hs + elseif obj.spec.updateStrategy.rollingUpdate.partition ~= 0 and obj.metadata.generation > 1 then + if obj.status.updatedReplicas > (obj.status.replicas - obj.spec.updateStrategy.rollingUpdate.partition) then + hs.status = "Suspended" + hs.message = "Statefulset needs manual intervention" + return hs + end + + elseif obj.status.updatedAvailableReplicas == obj.status.replicas then + hs.status = "Healthy" + hs.message = "All Statefulset workloads are ready and updated" + return hs + + else + if obj.status.updatedAvailableReplicas ~= obj.status.replicas then + hs.status = "Degraded" + hs.message = "Some replicas are not ready or available" + return hs + end + end + + end + +end + +return hs diff --git a/pkg/resource_customizations/apps.kruise.io/StatefulSet/health_test.yaml b/pkg/resource_customizations/apps.kruise.io/StatefulSet/health_test.yaml new file mode 100644 index 0000000..6672b9f --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/StatefulSet/health_test.yaml @@ -0,0 +1,21 @@ +tests: + - healthStatus: + status: Healthy + message: "All Statefulset workloads are ready and updated" + inputPath: testdata/healthy.yaml + - healthStatus: + status: Degraded + message: "Some replicas are not ready or available" + inputPath: testdata/degraded.yaml + - healthStatus: + status: Progressing + message: "Waiting for initialization" + inputPath: testdata/unknown.yaml + - healthStatus: + status: Suspended + message: "Statefulset is paused" + inputPath: testdata/suspended.yaml + - healthStatus: + status: Suspended + message: "Statefulset needs manual intervention" + inputPath: testdata/partition_suspended.yaml diff --git a/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/degraded.yaml b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/degraded.yaml new file mode 100644 index 0000000..88e5891 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/degraded.yaml @@ -0,0 +1,42 @@ +apiVersion: apps.kruise.io/v1beta1 +kind: StatefulSet +metadata: + name: statefulset-test + namespace: kruise + generation: 5 + labels: + app: sample +spec: + replicas: 2 + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + minReadySeconds: 0 + paused: false + partition: 0 + podUpdatePolicy: ReCreate + type: RollingUpdate + +status: + observedGeneration: 5 + replicas: 2 + updatedAvailableReplicas: 1 + updatedReadyReplicas: 1 + conditions: + - lastTransitionTime: "2021-09-21T22:35:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: 'True' + type: FailedCreatePod + diff --git a/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/healthy.yaml b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/healthy.yaml new file mode 100644 index 0000000..793de25 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/healthy.yaml @@ -0,0 +1,41 @@ +apiVersion: apps.kruise.io/v1beta1 +kind: StatefulSet +metadata: + name: statefulset-test + namespace: kruise + generation: 2 + labels: + app: sample +spec: + replicas: 2 + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + minReadySeconds: 0 + paused: false + partition: 0 + podUpdatePolicy: ReCreate + type: RollingUpdate + +status: + observedGeneration: 2 + replicas: 2 + updatedAvailableReplicas: 2 + updatedReadyReplicas: 2 + conditions: + - lastTransitionTime: "2021-09-21T22:35:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: 'False' + type: FailedCreatePod diff --git a/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/partition_suspended.yaml b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/partition_suspended.yaml new file mode 100644 index 0000000..b09a772 --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/partition_suspended.yaml @@ -0,0 +1,36 @@ +apiVersion: apps.kruise.io/v1beta1 +kind: StatefulSet +metadata: + name: statefulset-test + namespace: kruise + generation: 3 + labels: + app: sample +spec: + replicas: 10 + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - image: nginx:mainline + updateStrategy: + rollingUpdate: + partition: 4 + +status: + availableReplicas: 10 + currentReplicas: 4 + currentRevision: statefulset-test-d4d4fb5bd + labelSelector: app=sample + observedGeneration: 3 + readyReplicas: 10 + replicas: 10 + updateRevision: statefulset-test-56dfb978d4 + updatedAvailableReplicas: 7 + updatedReadyReplicas: 7 + updatedReplicas: 7 diff --git a/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/suspended.yaml b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/suspended.yaml new file mode 100644 index 0000000..42dae9c --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/suspended.yaml @@ -0,0 +1,36 @@ +apiVersion: apps.kruise.io/v1beta1 +kind: StatefulSet +metadata: + name: statefulset-test + namespace: kruise + generation: 2 + labels: + app: sample +spec: + replicas: 2 + selector: + matchLabels: + app: sample + template: + metadata: + labels: + app: sample + spec: + containers: + - name: nginx + image: nginx:alpine + updateStrategy: + rollingUpdate: + paused: true + +status: + observedGeneration: 2 + replicas: 2 + updatedAvailableReplicas: 2 + updatedReadyReplicas: 2 + conditions: + - lastTransitionTime: "2021-09-21T22:35:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: 'False' + type: FailedCreatePod diff --git a/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/unknown.yaml b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/unknown.yaml new file mode 100644 index 0000000..67d28de --- /dev/null +++ b/pkg/resource_customizations/apps.kruise.io/StatefulSet/testdata/unknown.yaml @@ -0,0 +1,5 @@ +apiVersion: apps.kruise.io/v1beta1 +kind: StatefulSet +metadata: + name: statefulset-test + namespace: kruise diff --git a/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/health.lua b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/health.lua new file mode 100644 index 0000000..0e3faaf --- /dev/null +++ b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/health.lua @@ -0,0 +1,27 @@ +local health_check = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil and obj.status.replicas ~= nil then + local numTrue = 0 + for i, condition in pairs(obj.status.conditions) do + if (condition.type == "Available" or (condition.type == "Progressing" and condition.reason == "NewReplicationControllerAvailable")) and condition.status == "True" then + numTrue = numTrue + 1 + end + end + if numTrue == 2 or obj.status.replicas == 0 then + health_check.status = "Healthy" + health_check.message = "replication controller successfully rolled out" + return health_check + elseif numTrue == 1 then + health_check.status = "Progressing" + health_check.message = "replication controller is waiting for pods to run" + return health_check + else + health_check.status = "Degraded" + health_check.message = "Deployment config is degraded" + return health_check + end + end +end +health_check.status = "Progressing" +health_check.message = "replication controller is waiting for pods to run" +return health_check \ No newline at end of file diff --git a/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/health_test.yaml b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/health_test.yaml new file mode 100644 index 0000000..cd38c4b --- /dev/null +++ b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Progressing + message: "replication controller is waiting for pods to run" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Progressing + message: "replication controller is waiting for pods to run" + inputPath: testdata/progressing_rc_updated.yaml +- healthStatus: + status: Degraded + message: "Deployment config is degraded" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "replication controller successfully rolled out" + inputPath: testdata/healthy.yaml +- healthStatus: + status: Healthy + message: "replication controller successfully rolled out" + inputPath: testdata/healthy_zero_replicas.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/degraded.yaml b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/degraded.yaml new file mode 100644 index 0000000..31379fa --- /dev/null +++ b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/degraded.yaml @@ -0,0 +1,156 @@ +kind: DeploymentConfig +apiVersion: apps.openshift.io/v1 +metadata: + name: example + namespace: default + uid: ba99eac7-ed9c-4154-bda1-69d25a90b278 + resourceVersion: '412743' + generation: 1 + creationTimestamp: '2021-08-25T23:48:11Z' + managedFields: + - manager: Mozilla + operation: Update + apiVersion: apps.openshift.io/v1 + time: '2021-08-25T23:48:11Z' + fieldsType: FieldsV1 + fieldsV1: + 'f:spec': + 'f:replicas': {} + 'f:selector': + .: {} + 'f:app': {} + 'f:strategy': + 'f:activeDeadlineSeconds': {} + 'f:rollingParams': + .: {} + 'f:intervalSeconds': {} + 'f:maxSurge': {} + 'f:maxUnavailable': {} + 'f:timeoutSeconds': {} + 'f:updatePeriodSeconds': {} + 'f:type': {} + 'f:template': + .: {} + 'f:metadata': + .: {} + 'f:creationTimestamp': {} + 'f:labels': + .: {} + 'f:app': {} + 'f:spec': + .: {} + 'f:containers': + .: {} + 'k:{"name":"httpd"}': + .: {} + 'f:image': {} + 'f:imagePullPolicy': {} + 'f:name': {} + 'f:ports': + .: {} + 'k:{"containerPort":8080,"protocol":"TCP"}': + .: {} + 'f:containerPort': {} + 'f:protocol': {} + 'f:resources': {} + 'f:terminationMessagePath': {} + 'f:terminationMessagePolicy': {} + 'f:dnsPolicy': {} + 'f:restartPolicy': {} + 'f:schedulerName': {} + 'f:securityContext': {} + 'f:terminationGracePeriodSeconds': {} + 'f:triggers': {} + - manager: openshift-controller-manager + operation: Update + apiVersion: apps.openshift.io/v1 + time: '2021-08-25T23:48:11Z' + fieldsType: FieldsV1 + fieldsV1: + 'f:status': + 'f:conditions': + .: {} + 'k:{"type":"Available"}': + .: {} + 'f:lastTransitionTime': {} + 'f:lastUpdateTime': {} + 'f:message': {} + 'f:status': {} + 'f:type': {} + 'k:{"type":"Progressing"}': + .: {} + 'f:lastTransitionTime': {} + 'f:lastUpdateTime': {} + 'f:message': {} + 'f:status': {} + 'f:type': {} + 'f:details': + .: {} + 'f:causes': {} + 'f:message': {} + 'f:latestVersion': {} + 'f:observedGeneration': {} +spec: + strategy: + type: Rolling + rollingParams: + updatePeriodSeconds: 1 + intervalSeconds: 1 + timeoutSeconds: 600 + maxUnavailable: 25% + maxSurge: 25% + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 3 + revisionHistoryLimit: 10 + test: false + selector: + app: httpd + template: + metadata: + creationTimestamp: null + labels: + app: httpd + spec: + containers: + - name: httpd + image: >- + image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler +status: + latestVersion: 1 + observedGeneration: 1 + replicas: 10 + updatedReplicas: 10 + availableReplicas: 0 + unavailableReplicas: 10 + details: + message: config change + causes: + - type: ConfigChange + conditions: + - type: Available + status: 'False' + lastUpdateTime: '2021-08-25T23:48:11Z' + lastTransitionTime: '2021-08-25T23:48:11Z' + message: Deployment config does not have minimum availability. + - type: Progressing + status: Unknown + lastUpdateTime: '2021-08-25T23:48:11Z' + lastTransitionTime: '2021-08-25T23:48:11Z' + message: >- + replication controller "example-1" is waiting for pod "example-1-deploy" + to run \ No newline at end of file diff --git a/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/healthy.yaml b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/healthy.yaml new file mode 100644 index 0000000..266f655 --- /dev/null +++ b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/healthy.yaml @@ -0,0 +1,66 @@ +kind: DeploymentConfig +apiVersion: apps.openshift.io/v1 +metadata: + name: example + namespace: default +spec: + strategy: + type: Rolling + rollingParams: + updatePeriodSeconds: 1 + intervalSeconds: 1 + timeoutSeconds: 600 + maxUnavailable: 25% + maxSurge: 25% + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 3 + revisionHistoryLimit: 10 + test: false + selector: + app: httpd + template: + metadata: + creationTimestamp: null + labels: + app: httpd + spec: + containers: + - name: httpd + image: >- + image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler +status: + observedGeneration: 1 + details: + message: config change + causes: + - type: ConfigChange + availableReplicas: 3 + conditions: + - type: Available + status: 'True' + lastUpdateTime: '2021-08-25T23:48:29Z' + lastTransitionTime: '2021-08-25T23:48:29Z' + message: Deployment config has minimum availability. + - type: Progressing + status: 'True' + lastUpdateTime: '2021-08-25T23:48:29Z' + lastTransitionTime: '2021-08-25T23:48:15Z' + reason: NewReplicationControllerAvailable + message: replication controller "example-1" successfully rolled out + replicas: 3 + readyReplicas: 3 \ No newline at end of file diff --git a/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/healthy_zero_replicas.yaml b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/healthy_zero_replicas.yaml new file mode 100644 index 0000000..c0837a3 --- /dev/null +++ b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/healthy_zero_replicas.yaml @@ -0,0 +1,68 @@ +kind: DeploymentConfig +apiVersion: apps.openshift.io/v1 +metadata: + name: example + namespace: default +spec: + strategy: + type: Rolling + rollingParams: + updatePeriodSeconds: 1 + intervalSeconds: 1 + timeoutSeconds: 600 + maxUnavailable: 25% + maxSurge: 25% + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 3 + revisionHistoryLimit: 10 + test: false + selector: + app: httpd + template: + metadata: + creationTimestamp: null + labels: + app: httpd + spec: + containers: + - name: httpd + image: >- + image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler +status: + availableReplicas: 0 + conditions: + - lastTransitionTime: '2022-02-02T12:22:22Z' + lastUpdateTime: '2022-02-02T12:23:53Z' + message: replication controller "jenkins-1" successfully rolled out + reason: NewReplicationControllerAvailable + status: 'True' + type: Progressing + - lastTransitionTime: '2022-02-02T14:11:11Z' + lastUpdateTime: '2022-02-02T14:11:11Z' + message: Deployment config does not have minimum availability. + status: 'False' + type: Available + details: + causes: + - type: ConfigChange + message: config change + latestVersion: 1 + observedGeneration: 5 + replicas: 0 + unavailableReplicas: 0 + updatedReplicas: 0 \ No newline at end of file diff --git a/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/progressing.yaml b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/progressing.yaml new file mode 100644 index 0000000..6f83257 --- /dev/null +++ b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/progressing.yaml @@ -0,0 +1,163 @@ +kind: DeploymentConfig +apiVersion: apps.openshift.io/v1 +metadata: + name: example + namespace: default + uid: 365e7b5a-a72e-4fdf-be9d-cd5590d956bb + resourceVersion: '88832' + generation: 1 + creationTimestamp: '2021-08-26T04:37:06Z' + managedFields: + - manager: Mozilla + operation: Update + apiVersion: apps.openshift.io/v1 + time: '2021-08-26T04:37:06Z' + fieldsType: FieldsV1 + fieldsV1: + 'f:spec': + 'f:replicas': {} + 'f:revisionHistoryLimit': {} + 'f:selector': + .: {} + 'f:app': {} + 'f:strategy': + 'f:activeDeadlineSeconds': {} + 'f:rollingParams': + .: {} + 'f:intervalSeconds': {} + 'f:maxSurge': {} + 'f:maxUnavailable': {} + 'f:timeoutSeconds': {} + 'f:updatePeriodSeconds': {} + 'f:type': {} + 'f:template': + .: {} + 'f:metadata': + .: {} + 'f:creationTimestamp': {} + 'f:labels': + .: {} + 'f:app': {} + 'f:spec': + .: {} + 'f:containers': + .: {} + 'k:{"name":"httpd"}': + .: {} + 'f:image': {} + 'f:imagePullPolicy': {} + 'f:name': {} + 'f:ports': + .: {} + 'k:{"containerPort":8080,"protocol":"TCP"}': + .: {} + 'f:containerPort': {} + 'f:protocol': {} + 'f:resources': {} + 'f:terminationMessagePath': {} + 'f:terminationMessagePolicy': {} + 'f:dnsPolicy': {} + 'f:restartPolicy': {} + 'f:schedulerName': {} + 'f:securityContext': {} + 'f:terminationGracePeriodSeconds': {} + 'f:triggers': {} + - manager: openshift-controller-manager + operation: Update + apiVersion: apps.openshift.io/v1 + time: '2021-08-26T04:37:11Z' + fieldsType: FieldsV1 + fieldsV1: + 'f:status': + 'f:updatedReplicas': {} + 'f:readyReplicas': {} + 'f:conditions': + .: {} + 'k:{"type":"Available"}': + .: {} + 'f:lastTransitionTime': {} + 'f:lastUpdateTime': {} + 'f:message': {} + 'f:status': {} + 'f:type': {} + 'k:{"type":"Progressing"}': + .: {} + 'f:lastTransitionTime': {} + 'f:lastUpdateTime': {} + 'f:message': {} + 'f:status': {} + 'f:type': {} + 'f:details': + .: {} + 'f:causes': {} + 'f:message': {} + 'f:replicas': {} + 'f:availableReplicas': {} + 'f:observedGeneration': {} + 'f:unavailableReplicas': {} + 'f:latestVersion': {} +spec: + strategy: + type: Rolling + rollingParams: + updatePeriodSeconds: 1 + intervalSeconds: 1 + timeoutSeconds: 600 + maxUnavailable: 25% + maxSurge: 25% + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 20 + revisionHistoryLimit: 10 + test: false + selector: + app: httpd + template: + metadata: + creationTimestamp: null + labels: + app: httpd + spec: + containers: + - name: httpd + image: >- + image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler +status: + observedGeneration: 1 + details: + message: config change + causes: + - type: ConfigChange + availableReplicas: 20 + unavailableReplicas: 0 + latestVersion: 1 + updatedReplicas: 20 + conditions: + - type: Progressing + status: Unknown + lastUpdateTime: '2021-08-26T04:37:06Z' + lastTransitionTime: '2021-08-26T04:37:06Z' + message: >- + replication controller "example-1" is waiting for pod "example-1-deploy" + to run + - type: Available + status: 'True' + lastUpdateTime: '2021-08-26T04:37:12Z' + lastTransitionTime: '2021-08-26T04:37:12Z' + message: Deployment config has minimum availability. + replicas: 20 + readyReplicas: 20 diff --git a/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/progressing_rc_updated.yaml b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/progressing_rc_updated.yaml new file mode 100644 index 0000000..8495344 --- /dev/null +++ b/pkg/resource_customizations/apps.openshift.io/DeploymentConfig/testdata/progressing_rc_updated.yaml @@ -0,0 +1,66 @@ +kind: DeploymentConfig +apiVersion: apps.openshift.io/v1 +metadata: + name: example + namespace: default +spec: + strategy: + type: Rolling + rollingParams: + updatePeriodSeconds: 1 + intervalSeconds: 1 + timeoutSeconds: 600 + maxUnavailable: 25% + maxSurge: 25% + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 3 + revisionHistoryLimit: 10 + test: false + selector: + app: httpd + template: + metadata: + creationTimestamp: null + labels: + app: httpd + spec: + containers: + - name: httpd + image: >- + image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler +status: + observedGeneration: 1 + details: + message: config change + causes: + - type: ConfigChange + availableReplicas: 3 + conditions: + - type: Available + status: 'True' + lastUpdateTime: '2021-08-25T23:48:29Z' + lastTransitionTime: '2021-08-25T23:48:29Z' + message: Deployment config has minimum availability. + - type: Progressing + status: 'True' + lastUpdateTime: '2021-08-25T23:48:29Z' + lastTransitionTime: '2021-08-25T23:48:15Z' + reason: ReplicationControllerUpdated + message: replication controller "example-1" is progressing + replicas: 3 + readyReplicas: 3 \ No newline at end of file diff --git a/pkg/resource_customizations/apps/DaemonSet/actions/action_test.yaml b/pkg/resource_customizations/apps/DaemonSet/actions/action_test.yaml new file mode 100644 index 0000000..4298ac4 --- /dev/null +++ b/pkg/resource_customizations/apps/DaemonSet/actions/action_test.yaml @@ -0,0 +1,4 @@ +actionTests: +- action: restart + inputPath: testdata/daemonset.yaml + expectedOutputPath: testdata/daemonset-restarted.yaml diff --git a/pkg/resource_customizations/apps/DaemonSet/actions/discovery.lua b/pkg/resource_customizations/apps/DaemonSet/actions/discovery.lua new file mode 100644 index 0000000..dc7f104 --- /dev/null +++ b/pkg/resource_customizations/apps/DaemonSet/actions/discovery.lua @@ -0,0 +1,3 @@ +local actions = {} +actions["restart"] = {} +return actions diff --git a/pkg/resource_customizations/apps/DaemonSet/actions/restart/action.lua b/pkg/resource_customizations/apps/DaemonSet/actions/restart/action.lua new file mode 100644 index 0000000..457c767 --- /dev/null +++ b/pkg/resource_customizations/apps/DaemonSet/actions/restart/action.lua @@ -0,0 +1,9 @@ +local os = require("os") +if obj.spec.template.metadata == nil then + obj.spec.template.metadata = {} +end +if obj.spec.template.metadata.annotations == nil then + obj.spec.template.metadata.annotations = {} +end +obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ") +return obj diff --git a/pkg/resource_customizations/apps/DaemonSet/actions/testdata/daemonset-restarted.yaml b/pkg/resource_customizations/apps/DaemonSet/actions/testdata/daemonset-restarted.yaml new file mode 100644 index 0000000..e6ff036 --- /dev/null +++ b/pkg/resource_customizations/apps/DaemonSet/actions/testdata/daemonset-restarted.yaml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + deprecated.daemonset.template.generation: "3" + creationTimestamp: "2019-09-13T08:52:50Z" + generation: 3 + labels: + app.kubernetes.io/instance: extensions + name: daemonset + namespace: statefulset + resourceVersion: "7472656" + selfLink: /apis/apps/v1/namespaces/statefulset/daemonsets/daemonset + uid: de04d075-d603-11e9-9e69-42010aa8005f +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + name: daemonset + template: + metadata: + annotations: + kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z" + labels: + name: daemonset + spec: + containers: + - image: registry.k8s.io/nginx-slim:0.8 + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate +status: + currentNumberScheduled: 4 + desiredNumberScheduled: 4 + numberAvailable: 4 + numberMisscheduled: 0 + numberReady: 4 + observedGeneration: 3 + updatedNumberScheduled: 4 diff --git a/pkg/resource_customizations/apps/DaemonSet/actions/testdata/daemonset.yaml b/pkg/resource_customizations/apps/DaemonSet/actions/testdata/daemonset.yaml new file mode 100644 index 0000000..d293188 --- /dev/null +++ b/pkg/resource_customizations/apps/DaemonSet/actions/testdata/daemonset.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + deprecated.daemonset.template.generation: "3" + creationTimestamp: "2019-09-13T08:52:50Z" + generation: 3 + labels: + app.kubernetes.io/instance: extensions + name: daemonset + namespace: statefulset + resourceVersion: "7472656" + selfLink: /apis/apps/v1/namespaces/statefulset/daemonsets/daemonset + uid: de04d075-d603-11e9-9e69-42010aa8005f +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + name: daemonset + template: + metadata: + labels: + name: daemonset + spec: + containers: + - image: registry.k8s.io/nginx-slim:0.8 + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate +status: + currentNumberScheduled: 4 + desiredNumberScheduled: 4 + numberAvailable: 4 + numberMisscheduled: 0 + numberReady: 4 + observedGeneration: 3 + updatedNumberScheduled: 4 diff --git a/pkg/resource_customizations/apps/Deployment/actions/action_test.yaml b/pkg/resource_customizations/apps/Deployment/actions/action_test.yaml new file mode 100644 index 0000000..14538c8 --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/action_test.yaml @@ -0,0 +1,10 @@ +actionTests: +- action: restart + inputPath: testdata/deployment.yaml + expectedOutputPath: testdata/deployment-restarted.yaml +- action: pause + inputPath: testdata/deployment.yaml + expectedOutputPath: testdata/deployment-pause.yaml +- action: resume + inputPath: testdata/deployment-pause.yaml + expectedOutputPath: testdata/deployment-resume.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/apps/Deployment/actions/discovery.lua b/pkg/resource_customizations/apps/Deployment/actions/discovery.lua new file mode 100644 index 0000000..d090d58 --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/discovery.lua @@ -0,0 +1,10 @@ +local actions = {} +actions["restart"] = {} + +local paused = false +if obj.spec.paused ~= nil then + paused = obj.spec.paused + actions["pause"] = {paused} +end +actions["resume"] = {["disabled"] = not(paused)} +return actions diff --git a/pkg/resource_customizations/apps/Deployment/actions/pause/action.lua b/pkg/resource_customizations/apps/Deployment/actions/pause/action.lua new file mode 100644 index 0000000..ba3e108 --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/pause/action.lua @@ -0,0 +1,2 @@ +obj.spec.paused = true +return obj \ No newline at end of file diff --git a/pkg/resource_customizations/apps/Deployment/actions/restart/action.lua b/pkg/resource_customizations/apps/Deployment/actions/restart/action.lua new file mode 100644 index 0000000..457c767 --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/restart/action.lua @@ -0,0 +1,9 @@ +local os = require("os") +if obj.spec.template.metadata == nil then + obj.spec.template.metadata = {} +end +if obj.spec.template.metadata.annotations == nil then + obj.spec.template.metadata.annotations = {} +end +obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ") +return obj diff --git a/pkg/resource_customizations/apps/Deployment/actions/resume/action.lua b/pkg/resource_customizations/apps/Deployment/actions/resume/action.lua new file mode 100644 index 0000000..94cff65 --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/resume/action.lua @@ -0,0 +1,2 @@ +obj.spec.paused = nil +return obj \ No newline at end of file diff --git a/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-pause.yaml b/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-pause.yaml new file mode 100644 index 0000000..3ddbbe3 --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-pause.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + creationTimestamp: "2021-09-21T22:35:20Z" + name: nginx-deploy + namespace: default + generation: 2 +spec: + paused: true + progressDeadlineSeconds: 600 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: nginx + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: nginx + spec: + containers: + - image: nginx:latest + imagePullPolicy: Always + name: nginx + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 3 + conditions: + - lastTransitionTime: "2021-09-21T22:35:31Z" + lastUpdateTime: "2021-09-21T22:35:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2021-09-21T22:36:25Z" + lastUpdateTime: "2021-09-21T22:36:25Z" + message: Deployment is paused + reason: DeploymentPaused + status: Unknown + type: Progressing + observedGeneration: 2 + readyReplicas: 3 + replicas: 3 + updatedReplicas: 3 \ No newline at end of file diff --git a/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-restarted.yaml b/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-restarted.yaml new file mode 100644 index 0000000..373bd06 --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-restarted.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + creationTimestamp: "2019-09-12T01:33:53Z" + generation: 1 + name: nginx-deploy + namespace: default + resourceVersion: "6897444" + selfLink: /apis/apps/v1/namespaces/default/deployments/nginx-deploy + uid: 61689d6d-d4fd-11e9-9e69-42010aa8005f +spec: + progressDeadlineSeconds: 600 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: nginx + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: nginx + annotations: + kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z" + spec: + containers: + - image: nginx:latest + imagePullPolicy: Always + name: nginx + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 2 + conditions: + - lastTransitionTime: "2019-09-12T01:33:53Z" + lastUpdateTime: "2019-09-12T01:33:53Z" + message: Deployment does not have minimum availability. + reason: MinimumReplicasUnavailable + status: "False" + type: Available + - lastTransitionTime: "2019-09-12T01:33:53Z" + lastUpdateTime: "2019-09-12T01:34:05Z" + message: ReplicaSet "nginx-deploy-9cb4784bd" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + observedGeneration: 1 + readyReplicas: 2 + replicas: 3 + unavailableReplicas: 1 + updatedReplicas: 3 diff --git a/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-resume.yaml b/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-resume.yaml new file mode 100644 index 0000000..8ccb8dc --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment-resume.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + creationTimestamp: "2021-09-21T22:35:20Z" + generation: 3 + name: nginx-deploy + namespace: default +spec: + progressDeadlineSeconds: 600 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: nginx + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: nginx + spec: + containers: + - image: nginx:latest + imagePullPolicy: Always + name: nginx + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 3 + conditions: + - lastTransitionTime: "2021-09-21T22:35:31Z" + lastUpdateTime: "2021-09-21T22:35:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2021-09-21T22:38:10Z" + lastUpdateTime: "2021-09-21T22:38:10Z" + message: ReplicaSet "nginx-deploy-55649fd747" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 3 + readyReplicas: 3 + replicas: 3 + updatedReplicas: 3 \ No newline at end of file diff --git a/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment.yaml b/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment.yaml new file mode 100644 index 0000000..6d41db1 --- /dev/null +++ b/pkg/resource_customizations/apps/Deployment/actions/testdata/deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + creationTimestamp: "2019-09-12T01:33:53Z" + generation: 1 + name: nginx-deploy + namespace: default + resourceVersion: "6897444" + selfLink: /apis/apps/v1/namespaces/default/deployments/nginx-deploy + uid: 61689d6d-d4fd-11e9-9e69-42010aa8005f +spec: + progressDeadlineSeconds: 600 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: nginx + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:latest + imagePullPolicy: Always + name: nginx + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 2 + conditions: + - lastTransitionTime: "2019-09-12T01:33:53Z" + lastUpdateTime: "2019-09-12T01:33:53Z" + message: Deployment does not have minimum availability. + reason: MinimumReplicasUnavailable + status: "False" + type: Available + - lastTransitionTime: "2019-09-12T01:33:53Z" + lastUpdateTime: "2019-09-12T01:34:05Z" + message: ReplicaSet "nginx-deploy-9cb4784bd" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + observedGeneration: 1 + readyReplicas: 2 + replicas: 3 + unavailableReplicas: 1 + updatedReplicas: 3 diff --git a/pkg/resource_customizations/apps/StatefulSet/actions/action_test.yaml b/pkg/resource_customizations/apps/StatefulSet/actions/action_test.yaml new file mode 100644 index 0000000..bab15aa --- /dev/null +++ b/pkg/resource_customizations/apps/StatefulSet/actions/action_test.yaml @@ -0,0 +1,4 @@ +actionTests: +- action: restart + inputPath: testdata/statefulset.yaml + expectedOutputPath: testdata/statefulset-restarted.yaml diff --git a/pkg/resource_customizations/apps/StatefulSet/actions/discovery.lua b/pkg/resource_customizations/apps/StatefulSet/actions/discovery.lua new file mode 100644 index 0000000..dc7f104 --- /dev/null +++ b/pkg/resource_customizations/apps/StatefulSet/actions/discovery.lua @@ -0,0 +1,3 @@ +local actions = {} +actions["restart"] = {} +return actions diff --git a/pkg/resource_customizations/apps/StatefulSet/actions/restart/action.lua b/pkg/resource_customizations/apps/StatefulSet/actions/restart/action.lua new file mode 100644 index 0000000..457c767 --- /dev/null +++ b/pkg/resource_customizations/apps/StatefulSet/actions/restart/action.lua @@ -0,0 +1,9 @@ +local os = require("os") +if obj.spec.template.metadata == nil then + obj.spec.template.metadata = {} +end +if obj.spec.template.metadata.annotations == nil then + obj.spec.template.metadata.annotations = {} +end +obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ") +return obj diff --git a/pkg/resource_customizations/apps/StatefulSet/actions/testdata/statefulset-restarted.yaml b/pkg/resource_customizations/apps/StatefulSet/actions/testdata/statefulset-restarted.yaml new file mode 100644 index 0000000..44f902f --- /dev/null +++ b/pkg/resource_customizations/apps/StatefulSet/actions/testdata/statefulset-restarted.yaml @@ -0,0 +1,52 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + creationTimestamp: "2019-09-13T08:52:54Z" + generation: 2 + labels: + app.kubernetes.io/instance: extensions + name: statefulset + namespace: statefulset + resourceVersion: "7471813" + selfLink: /apis/apps/v1/namespaces/statefulset/statefulsets/statefulset + uid: dfe8fadf-d603-11e9-9e69-42010aa8005f +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: statefulset + serviceName: statefulset + template: + metadata: + labels: + app: statefulset + annotations: + kubectl.kubernetes.io/restartedAt: "0001-01-01T00:00:00Z" + spec: + containers: + - image: registry.k8s.io/nginx-slim:0.8 + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate +status: + collisionCount: 0 + currentReplicas: 3 + currentRevision: statefulset-85b7f767c6 + observedGeneration: 2 + readyReplicas: 3 + replicas: 3 + updateRevision: statefulset-85b7f767c6 + updatedReplicas: 3 diff --git a/pkg/resource_customizations/apps/StatefulSet/actions/testdata/statefulset.yaml b/pkg/resource_customizations/apps/StatefulSet/actions/testdata/statefulset.yaml new file mode 100644 index 0000000..7804814 --- /dev/null +++ b/pkg/resource_customizations/apps/StatefulSet/actions/testdata/statefulset.yaml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + creationTimestamp: "2019-09-13T08:52:54Z" + generation: 2 + labels: + app.kubernetes.io/instance: extensions + name: statefulset + namespace: statefulset + resourceVersion: "7471813" + selfLink: /apis/apps/v1/namespaces/statefulset/statefulsets/statefulset + uid: dfe8fadf-d603-11e9-9e69-42010aa8005f +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: statefulset + serviceName: statefulset + template: + metadata: + labels: + app: statefulset + spec: + containers: + - image: registry.k8s.io/nginx-slim:0.8 + imagePullPolicy: IfNotPresent + name: nginx + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate +status: + collisionCount: 0 + currentReplicas: 3 + currentRevision: statefulset-85b7f767c6 + observedGeneration: 2 + readyReplicas: 3 + replicas: 3 + updateRevision: statefulset-85b7f767c6 + updatedReplicas: 3 diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/action_test.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/action_test.yaml new file mode 100644 index 0000000..e99cfeb --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/action_test.yaml @@ -0,0 +1,13 @@ +discoveryTests: +- inputPath: testdata/runningAnalysisRun.yaml + result: + - name: terminate + disabled: false +- inputPath: testdata/failedAnalysisRun.yaml + result: + - name: terminate + disabled: true +actionTests: +- action: terminate + inputPath: testdata/runningAnalysisRun.yaml + expectedOutputPath: testdata/runningAnalysisRun_terminated.yaml diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/discovery.lua b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/discovery.lua new file mode 100644 index 0000000..b5c2ef8 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/discovery.lua @@ -0,0 +1,8 @@ +local actions = {} +actions["terminate"] = {["disabled"] = (obj.spec.terminate or + obj.status.phase == "Successful" or + obj.status.phase == "Failed" or + obj.status.phase == "Error" or + obj.status.phase == "Inconclusive" +)} +return actions diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/terminate/action.lua b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/terminate/action.lua new file mode 100644 index 0000000..9ab153d --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/terminate/action.lua @@ -0,0 +1,2 @@ +obj.spec.terminate = true +return obj diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/failedAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/failedAnalysisRun.yaml new file mode 100644 index 0000000..c094f38 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/failedAnalysisRun.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-9k5rj + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) > 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) > 0 +status: + metricResults: + - count: 1 + failed: 1 + measurements: + - finishedAt: '2019-10-28T18:23:23Z' + startedAt: '2019-10-28T18:23:23Z' + phase: Failed + value: '[0.9768211920529802]' + name: memory-usage + phase: Failed + phase: Failed diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/runningAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/runningAnalysisRun.yaml new file mode 100644 index 0000000..36848b7 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/runningAnalysisRun.yaml @@ -0,0 +1,35 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-5bpxj + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) == 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) > 0 +status: + metricResults: + - count: 2 + measurements: + - finishedAt: '2019-10-28T18:22:05Z' + startedAt: '2019-10-28T18:22:05Z' + phase: Successful + value: '[0.9721293199554069]' + - finishedAt: '2019-10-28T18:22:15Z' + startedAt: '2019-10-28T18:22:15Z' + phase: Successful + value: '[0.9721293199554069]' + name: memory-usage + phase: Running + successful: 2 + phase: Running diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/runningAnalysisRun_terminated.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/runningAnalysisRun_terminated.yaml new file mode 100644 index 0000000..1c133cf --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/actions/testdata/runningAnalysisRun_terminated.yaml @@ -0,0 +1,36 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-5bpxj + namespace: default +spec: + terminate: true + analysisSpec: + metrics: + - failureCondition: len(result) == 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) > 0 +status: + metricResults: + - count: 2 + measurements: + - finishedAt: '2019-10-28T18:22:05Z' + startedAt: '2019-10-28T18:22:05Z' + phase: Successful + value: '[0.9721293199554069]' + - finishedAt: '2019-10-28T18:22:15Z' + startedAt: '2019-10-28T18:22:15Z' + phase: Successful + value: '[0.9721293199554069]' + name: memory-usage + phase: Running + successful: 2 + phase: Running diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/health.lua b/pkg/resource_customizations/argoproj.io/AnalysisRun/health.lua new file mode 100644 index 0000000..9329b13 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/health.lua @@ -0,0 +1,40 @@ +local hs = {} + +function messageOrDefault(field, default) + if field ~= nil then + return field + end + return default + end + +if obj.status ~= nil then + if obj.status.phase == "Pending" then + hs.status = "Progressing" + hs.message = "Analysis run is running" + end + if obj.status.phase == "Running" then + hs.status = "Progressing" + hs.message = "Analysis run is running" + end + if obj.status.phase == "Successful" then + hs.status = "Healthy" + hs.message = messageOrDefault(obj.status.message, "Analysis run completed successfully") + end + if obj.status.phase == "Failed" then + hs.status = "Degraded" + hs.message = messageOrDefault(obj.status.message, "Analysis run failed") + end + if obj.status.phase == "Error" then + hs.status = "Degraded" + hs.message = messageOrDefault(obj.status.message, "Analysis run had an error") + end + if obj.status.phase == "Inconclusive" then + hs.status = "Unknown" + hs.message = messageOrDefault(obj.status.message, "Analysis run was inconclusive") + end + return hs +end + +hs.status = "Progressing" +hs.message = "Waiting for analysis run to finish: status has not been reconciled." +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/health_test.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/health_test.yaml new file mode 100644 index 0000000..762139a --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/health_test.yaml @@ -0,0 +1,45 @@ +tests: +- healthStatus: + status: Progressing + message: "Analysis run is running" + inputPath: testdata/pendingAnalysisRun.yaml +- healthStatus: + status: Progressing + message: "Waiting for analysis run to finish: status has not been reconciled." + inputPath: testdata/noStatusAnalysisRun.yaml +- healthStatus: + status: Progressing + message: "Analysis run is running" + inputPath: testdata/runningAnalysisRun.yaml +- healthStatus: + status: Healthy + message: "Analysis run completed successfully" + inputPath: testdata/successfulAnalysisRun.yaml +- healthStatus: + status: Degraded + message: "Analysis run failed" + inputPath: testdata/failedAnalysisRun.yaml +- healthStatus: + status: Degraded + message: "Status Message: Assessed as Failed" + inputPath: testdata/failedAnalysisRunWithStatusMessage.yaml +- healthStatus: + status: Degraded + message: "Analysis run had an error" + inputPath: testdata/errorAnalysisRun.yaml +- healthStatus: + status: Degraded + message: "Status Message: Assessed as Error" + inputPath: testdata/errorAnalysisRunWithStatusMessage.yaml +- healthStatus: + status: Unknown + message: "Analysis run was inconclusive" + inputPath: testdata/inconclusiveAnalysisRun.yaml +- healthStatus: + status: Unknown + message: "Status Message: Assessed as Inconclusive" + inputPath: testdata/inconclusiveAnalysisRunWithStatusMessage.yaml +- healthStatus: + status: Healthy + message: "run terminated" + inputPath: testdata/terminatedAnalysisRun.yaml diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/errorAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/errorAnalysisRun.yaml new file mode 100644 index 0000000..0609656 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/errorAnalysisRun.yaml @@ -0,0 +1,47 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-btpgc + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: result < 92 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: result > 95 +status: + metricResults: + - consecutiveError: 5 + error: 5 + measurements: + - finishedAt: '2019-10-28T18:13:01Z' + startedAt: '2019-10-28T18:13:01Z' + phase: Error + value: '[0.9832775919732442]' + - finishedAt: '2019-10-28T18:13:11Z' + startedAt: '2019-10-28T18:13:11Z' + phase: Error + value: '[0.9832775919732442]' + - finishedAt: '2019-10-28T18:13:21Z' + startedAt: '2019-10-28T18:13:21Z' + phase: Error + value: '[0.9722530521642618]' + - finishedAt: '2019-10-28T18:13:31Z' + startedAt: '2019-10-28T18:13:31Z' + phase: Error + value: '[0.9722530521642618]' + - finishedAt: '2019-10-28T18:13:41Z' + startedAt: '2019-10-28T18:13:41Z' + phase: Error + value: '[0.9722530521642618]' + name: memory-usage + phase: Error + phase: Error diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/errorAnalysisRunWithStatusMessage.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/errorAnalysisRunWithStatusMessage.yaml new file mode 100644 index 0000000..e290ff0 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/errorAnalysisRunWithStatusMessage.yaml @@ -0,0 +1,48 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-btpgc + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: result < 92 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: result > 95 +status: + message: "Status Message: Assessed as Error" + metricResults: + - consecutiveError: 5 + error: 5 + measurements: + - finishedAt: '2019-10-28T18:13:01Z' + startedAt: '2019-10-28T18:13:01Z' + phase: Error + value: '[0.9832775919732442]' + - finishedAt: '2019-10-28T18:13:11Z' + startedAt: '2019-10-28T18:13:11Z' + phase: Error + value: '[0.9832775919732442]' + - finishedAt: '2019-10-28T18:13:21Z' + startedAt: '2019-10-28T18:13:21Z' + phase: Error + value: '[0.9722530521642618]' + - finishedAt: '2019-10-28T18:13:31Z' + startedAt: '2019-10-28T18:13:31Z' + phase: Error + value: '[0.9722530521642618]' + - finishedAt: '2019-10-28T18:13:41Z' + startedAt: '2019-10-28T18:13:41Z' + phase: Error + value: '[0.9722530521642618]' + name: memory-usage + phase: Error + phase: Error diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/failedAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/failedAnalysisRun.yaml new file mode 100644 index 0000000..c094f38 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/failedAnalysisRun.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-9k5rj + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) > 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) > 0 +status: + metricResults: + - count: 1 + failed: 1 + measurements: + - finishedAt: '2019-10-28T18:23:23Z' + startedAt: '2019-10-28T18:23:23Z' + phase: Failed + value: '[0.9768211920529802]' + name: memory-usage + phase: Failed + phase: Failed diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/failedAnalysisRunWithStatusMessage.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/failedAnalysisRunWithStatusMessage.yaml new file mode 100644 index 0000000..84c9ff8 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/failedAnalysisRunWithStatusMessage.yaml @@ -0,0 +1,32 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-9k5rj + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) > 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) > 0 +status: + message: "Status Message: Assessed as Failed" + metricResults: + - count: 1 + failed: 1 + measurements: + - finishedAt: '2019-10-28T18:23:23Z' + startedAt: '2019-10-28T18:23:23Z' + phase: Failed + value: '[0.9768211920529802]' + name: memory-usage + phase: Failed + phase: Failed diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/inconclusiveAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/inconclusiveAnalysisRun.yaml new file mode 100644 index 0000000..00a9c02 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/inconclusiveAnalysisRun.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-ddvn8 + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) == 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) == 0 +status: + metricResults: + - count: 1 + inconclusive: 1 + measurements: + - finishedAt: '2019-10-28T18:24:31Z' + startedAt: '2019-10-28T18:24:31Z' + phase: Inconclusive + value: '[0.9744444444444443]' + name: memory-usage + phase: Inconclusive + phase: Inconclusive diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/inconclusiveAnalysisRunWithStatusMessage.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/inconclusiveAnalysisRunWithStatusMessage.yaml new file mode 100644 index 0000000..43add8a --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/inconclusiveAnalysisRunWithStatusMessage.yaml @@ -0,0 +1,32 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-ddvn8 + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) == 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) == 0 +status: + message: "Status Message: Assessed as Inconclusive" + metricResults: + - count: 1 + inconclusive: 1 + measurements: + - finishedAt: '2019-10-28T18:24:31Z' + startedAt: '2019-10-28T18:24:31Z' + phase: Inconclusive + value: '[0.9744444444444443]' + name: memory-usage + phase: Inconclusive + phase: Inconclusive diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/noStatusAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/noStatusAnalysisRun.yaml new file mode 100644 index 0000000..c443493 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/noStatusAnalysisRun.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-9k5rj + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) > 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) > 0 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/pendingAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/pendingAnalysisRun.yaml new file mode 100644 index 0000000..55f4840 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/pendingAnalysisRun.yaml @@ -0,0 +1,17 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: analysis-template +spec: + metrics: + - name: memory-usage + interval: 10 + successCondition: result > 95 + failureCondition: result < 92 + provider: + prometheus: + address: http://prometheus-operator-prometheus.prometheus-operator:9090 + query: | + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) / sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) +status: + phase: Pending \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/runningAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/runningAnalysisRun.yaml new file mode 100644 index 0000000..36848b7 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/runningAnalysisRun.yaml @@ -0,0 +1,35 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-5bpxj + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) == 0 + interval: 10 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) > 0 +status: + metricResults: + - count: 2 + measurements: + - finishedAt: '2019-10-28T18:22:05Z' + startedAt: '2019-10-28T18:22:05Z' + phase: Successful + value: '[0.9721293199554069]' + - finishedAt: '2019-10-28T18:22:15Z' + startedAt: '2019-10-28T18:22:15Z' + phase: Successful + value: '[0.9721293199554069]' + name: memory-usage + phase: Running + successful: 2 + phase: Running diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/successfulAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/successfulAnalysisRun.yaml new file mode 100644 index 0000000..0ebcde1 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/successfulAnalysisRun.yaml @@ -0,0 +1,30 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: canary-demo-analysis-template-6c6bb7cf6f-zvcmx + namespace: default +spec: + analysisSpec: + metrics: + - failureCondition: len(result) == 0 + name: memory-usage + provider: + prometheus: + address: 'http://prometheus-operator-prometheus.prometheus-operator:9090' + query: > + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview",status!~"[4-5].*"}[2m])) + / + sum(rate(nginx_ingress_controller_requests{ingress="canary-demo-preview"}[2m])) + successCondition: len(result) > 0 +status: + metricResults: + - count: 1 + measurements: + - finishedAt: '2019-10-28T18:20:37Z' + startedAt: '2019-10-28T18:20:37Z' + phase: Successful + value: '[0.965324384787472]' + name: memory-usage + phase: Successful + successful: 1 + phase: Successful diff --git a/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/terminatedAnalysisRun.yaml b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/terminatedAnalysisRun.yaml new file mode 100644 index 0000000..b7e8350 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/AnalysisRun/testdata/terminatedAnalysisRun.yaml @@ -0,0 +1,71 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + annotations: + rollout.argoproj.io/revision: '2' + creationTimestamp: '2020-11-06T18:39:45Z' + generation: 4 + labels: + rollout-type: Step + rollouts-pod-template-hash: ff68867ff + step-index: '0' + name: rollout-canary-ff68867ff-2-0 + namespace: default + ownerReferences: + - apiVersion: argoproj.io/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Rollout + name: rollout-canary + uid: 0223237a-0dc1-45f6-881c-fe1873b1771f + resourceVersion: '1381' + selfLink: >- + /apis/argoproj.io/v1alpha1/namespaces/default/analysisruns/rollout-canary-ff68867ff-2-0 + uid: 863da27d-df03-41d2-a528-cc2f1ec25358 +spec: + args: + - name: exit-code + value: '0' + - name: duration + value: 1h + metrics: + - name: sleep-job + provider: + job: + metadata: + creationTimestamp: null + spec: + backoffLimit: 0 + template: + metadata: + creationTimestamp: null + spec: + containers: + - args: + - 'sleep {{args.duration}} && exit {{args.exit-code}}' + command: + - sh + - '-c' + - '-x' + image: 'nginx:1.19-alpine' + name: sleep-job + resources: {} + restartPolicy: Never + terminate: true +status: + message: run terminated + metricResults: + - count: 1 + measurements: + - finishedAt: '2020-11-06T18:42:58Z' + message: metric terminated + metadata: + job-name: 863da27d-df03-41d2-a528-cc2f1ec25358.sleep-job.1 + phase: Successful + startedAt: '2020-11-06T18:39:45Z' + message: metric terminated + name: sleep-job + phase: Successful + successful: 1 + phase: Successful + startedAt: '2020-11-06T18:39:45Z' diff --git a/pkg/resource_customizations/argoproj.io/ApplicationSet/health.lua b/pkg/resource_customizations/argoproj.io/ApplicationSet/health.lua new file mode 100644 index 0000000..3a0cd19 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/ApplicationSet/health.lua @@ -0,0 +1,24 @@ +local hs = {} + +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in pairs(obj.status.conditions) do + if condition.type == "ErrorOccurred" and condition.status == "True" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "ResourcesUpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end + +-- Conditions were introduced in ApplicationSet v0.3. To give v0.2 users a good experience, we default to "Healthy". +-- Once v0.3 is more generally adopted, we'll default to "Progressing" instead. +hs.status = "Healthy" +hs.message = "" +return hs diff --git a/pkg/resource_customizations/argoproj.io/ApplicationSet/health_test.yaml b/pkg/resource_customizations/argoproj.io/ApplicationSet/health_test.yaml new file mode 100644 index 0000000..8c8aadc --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/ApplicationSet/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Healthy + message: "ApplicationSet up to date" + inputPath: testdata/healthyApplicationSet.yaml + - healthStatus: + status: Degraded + message: "found less than two generators, Merge requires two or more" + inputPath: testdata/errorApplicationSetWithStatusMessage.yaml + - healthStatus: + status: Healthy + message: "" + inputPath: testdata/noStatusApplicationSet.yaml diff --git a/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/errorApplicationSetWithStatusMessage.yaml b/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/errorApplicationSetWithStatusMessage.yaml new file mode 100644 index 0000000..11df7d2 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/errorApplicationSetWithStatusMessage.yaml @@ -0,0 +1,40 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git + namespace: argocd +spec: + generators: + - merge: + generators: [] + mergeKeys: + - server + template: + metadata: + name: '{{name}}' + spec: + destination: + namespace: default + server: '{{server}}' + project: default + source: + path: helm-guestbook + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD +status: + conditions: + - lastTransitionTime: "2021-11-12T14:28:01Z" + message: found less than two generators, Merge requires two or more + reason: ApplicationGenerationFromParamsError + status: "True" + type: ErrorOccurred + - lastTransitionTime: "2021-11-12T14:28:01Z" + message: found less than two generators, Merge requires two or more + reason: ErrorOccurred + status: "False" + type: ParametersGenerated + - lastTransitionTime: "2021-11-12T14:28:01Z" + message: found less than two generators, Merge requires two or more + reason: ApplicationGenerationFromParamsError + status: "False" + type: ResourcesUpToDate diff --git a/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/healthyApplicationSet.yaml b/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/healthyApplicationSet.yaml new file mode 100644 index 0000000..87f4086 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/healthyApplicationSet.yaml @@ -0,0 +1,60 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git + namespace: argocd +spec: + generators: + - merge: + generators: + - clusters: + values: + kafka: "true" + redis: "false" + - clusters: + selector: + matchLabels: + use-kafka: "false" + values: + kafka: "false" + - list: + elements: + - name: minikube + values.redis: "true" + mergeKeys: + - name + template: + metadata: + name: '{{name}}' + spec: + destination: + namespace: default + server: '{{server}}' + project: default + source: + helm: + parameters: + - name: kafka + value: '{{values.kafka}}' + - name: redis + value: '{{values.redis}}' + path: helm-guestbook + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD +status: + conditions: + - lastTransitionTime: "2021-11-12T18:40:00Z" + message: Successfully generated parameters for all Applications + reason: ApplicationSetUpToDate + status: "False" + type: ErrorOccurred + - lastTransitionTime: "2021-11-12T18:40:00Z" + message: Successfully generated parameters for all Applications + reason: ParametersGenerated + status: "True" + type: ParametersGenerated + - lastTransitionTime: "2021-11-12T18:40:00Z" + message: ApplicationSet up to date + reason: ApplicationSetUpToDate + status: "True" + type: ResourcesUpToDate diff --git a/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/noStatusApplicationSet.yaml b/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/noStatusApplicationSet.yaml new file mode 100644 index 0000000..45edc63 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/ApplicationSet/testdata/noStatusApplicationSet.yaml @@ -0,0 +1,43 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git + namespace: argocd +spec: + generators: + - merge: + generators: + - clusters: + values: + kafka: "true" + redis: "false" + - clusters: + selector: + matchLabels: + use-kafka: "false" + values: + kafka: "false" + - list: + elements: + - name: minikube + values.redis: "true" + mergeKeys: + - name + template: + metadata: + name: '{{name}}' + spec: + destination: + namespace: default + server: '{{server}}' + project: default + source: + helm: + parameters: + - name: kafka + value: '{{values.kafka}}' + - name: redis + value: '{{values.redis}}' + path: helm-guestbook + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/action_test.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/action_test.yaml new file mode 100644 index 0000000..7fce1c3 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/action_test.yaml @@ -0,0 +1,7 @@ +actionTests: +- action: create-workflow + inputPath: testdata/cronworkflow.yaml + expectedOutputPath: testdata/workflow.yaml +- action: create-workflow + inputPath: testdata/cronworkflow-without-label.yaml + expectedOutputPath: testdata/workflow-without-label.yaml diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/create-workflow/action.lua b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/create-workflow/action.lua new file mode 100644 index 0000000..c41da30 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/create-workflow/action.lua @@ -0,0 +1,82 @@ +local os = require("os") + +-- This action constructs a Workflow resource from a CronWorkflow resource, to enable creating a CronWorkflow instance +-- on demand. +-- It returns an array with a single member - a table with the operation to perform (create) and the Workflow resource. +-- It mimics the output of "argo submit --from=CronWorkflow/" command, declaratively. + +-- This code is written to mimic what the Argo Workflows API server does to create a Workflow from a CronWorkflow. +-- https://github.com/argoproj/argo-workflows/blob/873a58de7dd9dad76d5577b8c4294a58b52849b8/workflow/common/convert.go#L12 + +-- Deep-copying an object is a ChatGPT generated code. +-- Since empty tables are treated as empty arrays, the resulting k8s resource might be invalid (arrays instead of maps). +-- So empty tables are not cloned to the target object. +function deepCopy(object) + local lookup_table = {} + local function _copy(obj) + if type(obj) ~= "table" then + return obj + elseif lookup_table[obj] then + return lookup_table[obj] + elseif next(obj) == nil then + return nil + else + local new_table = {} + lookup_table[obj] = new_table + for key, value in pairs(obj) do + new_table[_copy(key)] = _copy(value) + end + return setmetatable(new_table, getmetatable(obj)) + end + end + return _copy(object) +end + +local workflow = {} +workflow.apiVersion = "argoproj.io/v1alpha1" +workflow.kind = "Workflow" + +workflow.metadata = {} +workflow.metadata.name = obj.metadata.name .. "-" ..os.date("!%Y%m%d%H%M") +workflow.metadata.namespace = obj.metadata.namespace +workflow.metadata.labels = {} +workflow.metadata.annotations = {} +if (obj.spec.workflowMetadata ~= nil) then + if (obj.spec.workflowMetadata.labels ~= nil) then + workflow.metadata.labels = deepCopy(obj.spec.workflowMetadata.labels) + end + if (obj.spec.workflowMetadata.annotations ~= nil) then + workflow.metadata.annotations = deepCopy(obj.spec.workflowMetadata.annotations) + end +end +workflow.metadata.labels["workflows.argoproj.io/cron-workflow"] = obj.metadata.name +if (obj.metadata.labels ~= nil and obj.metadata.labels["workflows.argoproj.io/controller-instanceid"] ~= nil) then + workflow.metadata.labels["workflows.argoproj.io/controller-instanceid"] = obj.metadata.labels["workflows.argoproj.io/controller-instanceid"] +end +workflow.metadata.annotations["workflows.argoproj.io/scheduled-time"] = os.date("!%Y-%m-%dT%d:%H:%MZ") + +workflow.finalizers = {} +-- add all finalizers from obj.spec.workflowMetadata.finalizers +if (obj.spec.workflowMetadata ~= nil and obj.spec.workflowMetadata.finalizers ~= nil) then + for i, finalizer in ipairs(obj.spec.workflowMetadata.finalizers) do + workflow.finalizers[i] = finalizer + end +end + +local ownerRef = {} +ownerRef.apiVersion = obj.apiVersion +ownerRef.kind = obj.kind +ownerRef.name = obj.metadata.name +ownerRef.uid = obj.metadata.uid +workflow.metadata.ownerReferences = {} +workflow.metadata.ownerReferences[1] = ownerRef + +workflow.spec = deepCopy(obj.spec.workflowSpec) + +local impactedResource = {} +impactedResource.operation = "create" +impactedResource.resource = workflow +local result = {} +result[1] = impactedResource + +return result diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/discovery.lua b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/discovery.lua new file mode 100644 index 0000000..9a76d96 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/discovery.lua @@ -0,0 +1,6 @@ +local actions = {} +actions["create-workflow"] = { + ["iconClass"] = "fa fa-fw fa-play", + ["displayName"] = "Create Workflow" +} +return actions \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/cronworkflow-without-label.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/cronworkflow-without-label.yaml new file mode 100644 index 0000000..a9f9e2e --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/cronworkflow-without-label.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + annotations: + cronworkflows.argoproj.io/last-used-schedule: CRON_TZ=America/Los_Angeles * * * * * + name: hello-world + namespace: default +spec: + concurrencyPolicy: Replace + failedJobsHistoryLimit: 4 + schedule: '* * * * *' + startingDeadlineSeconds: 0 + successfulJobsHistoryLimit: 4 + suspend: true + timezone: America/Los_Angeles + workflowSpec: + entrypoint: whalesay + templates: + - container: + args: + - "\U0001F553 hello world. Scheduled on: {{workflow.scheduledTime}}" + command: + - cowsay + image: 'docker/whalesay:latest' + name: whalesay + workflowMetadata: + labels: + example: test + annotations: + another-example: another-test + finalizers: [test-finalizer] diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/cronworkflow.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/cronworkflow.yaml new file mode 100644 index 0000000..2a2c7d1 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/cronworkflow.yaml @@ -0,0 +1,34 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + annotations: + cronworkflows.argoproj.io/last-used-schedule: CRON_TZ=America/Los_Angeles * * * * * + labels: + workflows.argoproj.io/controller-instanceid: test-instance + app.kubernetes.io/instance: test + name: hello-world + namespace: default +spec: + concurrencyPolicy: Replace + failedJobsHistoryLimit: 4 + schedule: '* * * * *' + startingDeadlineSeconds: 0 + successfulJobsHistoryLimit: 4 + suspend: true + timezone: America/Los_Angeles + workflowSpec: + entrypoint: whalesay + templates: + - container: + args: + - "\U0001F553 hello world. Scheduled on: {{workflow.scheduledTime}}" + command: + - cowsay + image: 'docker/whalesay:latest' + name: whalesay + workflowMetadata: + labels: + example: test + annotations: + another-example: another-test + finalizers: [test-finalizer] diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/workflow-without-label.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/workflow-without-label.yaml new file mode 100644 index 0000000..1d20bc0 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/workflow-without-label.yaml @@ -0,0 +1,26 @@ +- k8sOperation: create + unstructuredObj: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + annotations: + another-example: another-test + labels: + example: test + name: hello-world-202306221736 + namespace: default + ownerReferences: + - apiVersion: argoproj.io/v1alpha1 + kind: CronWorkflow + name: hello-world + finalizers: [test-finalizer] + spec: + entrypoint: whalesay + templates: + - container: + args: + - "\U0001F553 hello world. Scheduled on: {{workflow.scheduledTime}}" + command: + - cowsay + image: 'docker/whalesay:latest' + name: whalesay diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/workflow.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/workflow.yaml new file mode 100644 index 0000000..9f231db --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/actions/testdata/workflow.yaml @@ -0,0 +1,28 @@ +- k8sOperation: create + unstructuredObj: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + annotations: + another-example: another-test + labels: + workflows.argoproj.io/cron-workflow: hello-world + workflows.argoproj.io/controller-instanceid: test-instance + example: test + name: hello-world-202306221736 + namespace: default + ownerReferences: + - apiVersion: argoproj.io/v1alpha1 + kind: CronWorkflow + name: hello-world + finalizers: [test-finalizer] + spec: + entrypoint: whalesay + templates: + - container: + args: + - "\U0001F553 hello world. Scheduled on: {{workflow.scheduledTime}}" + command: + - cowsay + image: 'docker/whalesay:latest' + name: whalesay diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/health.lua b/pkg/resource_customizations/argoproj.io/CronWorkflow/health.lua new file mode 100644 index 0000000..0a441df --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/health.lua @@ -0,0 +1,26 @@ +local hs = {} + +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in pairs(obj.status.conditions) do + if condition.type == "SpecError" and condition.status == "True" then + hs.status = "Degraded" + hs.message = condition.message + return hs + elseif condition.type == "SubmissionError" and condition.status == "True" then + hs.status = "Degraded" + hs.message = condition.message + return hs + elseif condition.type == "Completed" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end + +hs.status = "Healthy" +hs.message = "CronWorkflow is healthy." + +return hs diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/health_test.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/health_test.yaml new file mode 100644 index 0000000..d95d066 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/health_test.yaml @@ -0,0 +1,21 @@ +tests: + - healthStatus: + status: Healthy + message: "we are done" + inputPath: testdata/healthyCronWorkflow.yaml + - healthStatus: + status: Degraded + message: "something is wrong with the spec" + inputPath: testdata/degradedCronWorkflowWithSpecError.yaml + - healthStatus: + status: Degraded + message: "there is a submission error" + inputPath: testdata/degradedCronWorkflowWithSubmissionError.yaml + - healthStatus: + status: Healthy + message: "CronWorkflow is healthy." + inputPath: testdata/noConditionsCronWorkflow.yaml + - healthStatus: + status: Healthy + message: "CronWorkflow is healthy." + inputPath: testdata/withConditionButHealthyCronWorkflow.yaml diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/degradedCronWorkflowWithSpecError.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/degradedCronWorkflowWithSpecError.yaml new file mode 100644 index 0000000..d3911cd --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/degradedCronWorkflowWithSpecError.yaml @@ -0,0 +1,13 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + name: test-cron-wf + namespace: argocd +spec: + entrypoint: sampleEntryPoint +status: + conditions: + - lastTransitionTime: "2021-11-12T14:28:01Z" + message: something is wrong with the spec + status: "True" + type: SpecError diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/degradedCronWorkflowWithSubmissionError.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/degradedCronWorkflowWithSubmissionError.yaml new file mode 100644 index 0000000..0dce68e --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/degradedCronWorkflowWithSubmissionError.yaml @@ -0,0 +1,13 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + name: test-cron-wf + namespace: argocd +spec: + entrypoint: sampleEntryPoint +status: + conditions: + - lastTransitionTime: "2021-11-12T14:28:01Z" + message: there is a submission error + status: "True" + type: SubmissionError diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/healthyCronWorkflow.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/healthyCronWorkflow.yaml new file mode 100644 index 0000000..04a81ec --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/healthyCronWorkflow.yaml @@ -0,0 +1,13 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + name: test-cron-wf + namespace: argocd +spec: + entrypoint: sampleEntryPoint +status: + conditions: + - lastTransitionTime: "2021-11-12T14:28:01Z" + message: we are done + status: "True" + type: Completed diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/noConditionsCronWorkflow.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/noConditionsCronWorkflow.yaml new file mode 100644 index 0000000..4ec8b25 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/noConditionsCronWorkflow.yaml @@ -0,0 +1,7 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + name: test-cron-wf + namespace: argocd +spec: + entrypoint: sampleEntryPoint diff --git a/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/withConditionButHealthyCronWorkflow.yaml b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/withConditionButHealthyCronWorkflow.yaml new file mode 100644 index 0000000..99ada4d --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/CronWorkflow/testdata/withConditionButHealthyCronWorkflow.yaml @@ -0,0 +1,13 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + name: test-cron-wf + namespace: argocd +spec: + entrypoint: sampleEntryPoint +status: + conditions: + - lastTransitionTime: "2021-11-12T14:28:01Z" + message: this status may be outdated and we may still be progressing + status: "False" + type: SpecError diff --git a/pkg/resource_customizations/argoproj.io/EventBus/health.lua b/pkg/resource_customizations/argoproj.io/EventBus/health.lua new file mode 100644 index 0000000..9e97eab --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/EventBus/health.lua @@ -0,0 +1,21 @@ +local hs={ status = "Progressing", message = "Waiting for initialization" } + +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for _, condition in ipairs(obj.status.conditions) do + if condition.type == "Deployed" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message or condition.reason + return hs + end + if condition.type == "Deployed" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message or condition.reason + return hs + end + end + end +end + + +return hs diff --git a/pkg/resource_customizations/argoproj.io/EventBus/health_test.yaml b/pkg/resource_customizations/argoproj.io/EventBus/health_test.yaml new file mode 100644 index 0000000..7babe7f --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/EventBus/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Healthy + message: "JetStream is deployed" + inputPath: testdata/healthy.yaml + - healthStatus: + status: Degraded + message: 'failed to get jetstream version, err: unsupported version "iwillfail", supported versions: "2.9.5,latest"' + inputPath: testdata/degraded.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/EventBus/testdata/degraded.yaml b/pkg/resource_customizations/argoproj.io/EventBus/testdata/degraded.yaml new file mode 100644 index 0000000..4ffd1cf --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/EventBus/testdata/degraded.yaml @@ -0,0 +1,21 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: test + namespace: eventbus-test +spec: + jetstream: + replicas: 3 + version: iwillfail +status: + conditions: + - lastTransitionTime: null + status: 'True' + type: Configured + - lastTransitionTime: null + message: >- + failed to get jetstream version, err: unsupported version "iwillfail", + supported versions: "2.9.5,latest" + reason: JetStreamStatefulSetFailed + status: 'False' + type: Deployed \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/EventBus/testdata/healthy.yaml b/pkg/resource_customizations/argoproj.io/EventBus/testdata/healthy.yaml new file mode 100644 index 0000000..8db1455 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/EventBus/testdata/healthy.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: test + namespace: eventbus-test +spec: + jetstream: + replicas: 3 + version: latest +status: + conditions: + - lastTransitionTime: '2022-12-30T11:44:15Z' + status: 'True' + type: Configured + - lastTransitionTime: '2022-12-30T11:44:15Z' + message: JetStream is deployed + reason: Succeeded + status: 'True' + type: Deployed \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Experiment/health.lua b/pkg/resource_customizations/argoproj.io/Experiment/health.lua new file mode 100644 index 0000000..ad11caf --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Experiment/health.lua @@ -0,0 +1,28 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.phase == "Pending" then + hs.status = "Progressing" + hs.message = "Experiment is pending" + end + if obj.status.phase == "Running" then + hs.status = "Progressing" + hs.message = "Experiment is running" + end + if obj.status.phase == "Successful" then + hs.status = "Healthy" + hs.message = "Experiment is successful" + end + if obj.status.phase == "Failed" then + hs.status = "Degraded" + hs.message = "Experiment has failed" + end + if obj.status.phase == "Error" then + hs.status = "Degraded" + hs.message = "Experiment had an error" + end + return hs +end + +hs.status = "Progressing" +hs.message = "Waiting for experiment to finish: status has not been reconciled." +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Experiment/health_test.yaml b/pkg/resource_customizations/argoproj.io/Experiment/health_test.yaml new file mode 100644 index 0000000..1d14ec9 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Experiment/health_test.yaml @@ -0,0 +1,25 @@ +tests: +- healthStatus: + status: Progressing + message: "Experiment is pending" + inputPath: testdata/pendingExperiment.yaml +- healthStatus: + status: Progressing + message: "Waiting for experiment to finish: status has not been reconciled." + inputPath: testdata/noStatusExperiment.yaml +- healthStatus: + status: Progressing + message: "Experiment is running" + inputPath: testdata/runningExperiment.yaml +- healthStatus: + status: Healthy + message: "Experiment is successful" + inputPath: testdata/successfulExperiment.yaml +- healthStatus: + status: Degraded + message: "Experiment has failed" + inputPath: testdata/failedExperiment.yaml +- healthStatus: + status: Degraded + message: "Experiment had an error" + inputPath: testdata/errorExperiment.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Experiment/testdata/errorExperiment.yaml b/pkg/resource_customizations/argoproj.io/Experiment/testdata/errorExperiment.yaml new file mode 100644 index 0000000..3b1e8e9 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Experiment/testdata/errorExperiment.yaml @@ -0,0 +1,49 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + name: experiment-error-template-missing + namespace: jesse-test +spec: + analyses: + - name: does-not-exist + templateName: does-not-exist + templates: + - name: baseline + selector: + matchLabels: + app: rollouts-demo + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - image: argoproj/rollouts-demo:blue + name: rollouts-demo +status: + analysisRuns: + - analysisRun: "" + message: 'AnalysisTemplate verification failed for analysis ''does-not-exist'': + analysistemplate.argoproj.io "does-not-exist" not found' + name: does-not-exist + phase: Error + availableAt: "2019-10-27T23:13:10Z" + conditions: + - lastTransitionTime: "2019-10-27T23:13:07Z" + lastUpdateTime: "2019-10-28T05:59:33Z" + message: Experiment "experiment-error-template-missing" is running. + reason: NewReplicaSetAvailable + phase: "True" + type: Progressing + message: 'AnalysisTemplate verification failed for analysis ''does-not-exist'': + analysistemplate.argoproj.io "does-not-exist" not found' + running: true + phase: Error + templateStatuses: + - availableReplicas: 0 + lastTransitionTime: "2019-10-28T05:59:33Z" + name: baseline + readyReplicas: 0 + replicas: 0 + phase: Successful + updatedReplicas: 0 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Experiment/testdata/failedExperiment.yaml b/pkg/resource_customizations/argoproj.io/Experiment/testdata/failedExperiment.yaml new file mode 100644 index 0000000..ddc1977 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Experiment/testdata/failedExperiment.yaml @@ -0,0 +1,54 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + name: example-experiment + namespace: default +spec: + analyses: + - name: test + templateName: analysis-template + duration: 60 + templates: + - name: baseline + selector: + matchLabels: + app: rollouts-demo + color: blue + template: + metadata: + labels: + app: rollouts-demo + color: blue + spec: + containers: + - image: 'argoproj/rollouts-demo:blue' + name: guestbook +status: + analysisRuns: + - analysisRun: example-experiment-test-57vl8 + name: test + phase: Failed + availableAt: '2019-10-28T20:58:00Z' + conditions: + - lastTransitionTime: '2019-10-28T20:57:58Z' + lastUpdateTime: '2019-10-28T20:58:01Z' + message: Experiment "example-experiment" is running. + reason: NewReplicaSetAvailable + phase: 'True' + type: Progressing + phase: Failed + templateStatuses: + - availableReplicas: 0 + lastTransitionTime: '2019-10-28T20:58:01Z' + name: baseline + readyReplicas: 0 + replicas: 0 + phase: Successful + updatedReplicas: 0 + - availableReplicas: 0 + lastTransitionTime: '2019-10-28T20:58:01Z' + name: canary + readyReplicas: 0 + replicas: 0 + phase: Successful + updatedReplicas: 0 diff --git a/pkg/resource_customizations/argoproj.io/Experiment/testdata/noStatusExperiment.yaml b/pkg/resource_customizations/argoproj.io/Experiment/testdata/noStatusExperiment.yaml new file mode 100644 index 0000000..7508f1f --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Experiment/testdata/noStatusExperiment.yaml @@ -0,0 +1,33 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"argoproj.io/v1alpha1","kind":"Experiment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"rollouts-canary"},"name":"example-experiment","namespace":"argo-rollouts"},"spec":{"duration":60,"templates":[{"name":"baseline","selector":{"matchLabels":{"app":"rollouts-demo","color":"blue"}},"template":{"metadata":{"labels":{"app":"rollouts-demo","color":"blue"}},"spec":{"containers":[{"image":"argoproj/rollouts-demo:blue","name":"guestbook"}]}}},{"name":"canary","selector":{"matchLabels":{"app":"rollouts-demo","color":"yellow"}},"template":{"metadata":{"labels":{"app":"rollouts-demo","color":"yellow"}},"spec":{"containers":[{"image":"argoproj/rollouts-demo:yellow","name":"guestbook"}]}}}]}} + creationTimestamp: '2019-10-28T20:13:28Z' + generation: 1 + labels: + app.kubernetes.io/instance: rollouts-canary + name: example-experiment + namespace: argo-rollouts + resourceVersion: '28562006' + selfLink: >- + /apis/argoproj.io/v1alpha1/namespaces/argo-rollouts/experiments/example-experiment + uid: 67792f8a-f9bf-11e9-a15b-42010aa80033 +spec: + duration: 60 + templates: + - name: baseline + selector: + matchLabels: + app: rollouts-demo + color: blue + template: + metadata: + labels: + app: rollouts-demo + color: blue + spec: + containers: + - image: 'argoproj/rollouts-demo:blue' + name: guestbook diff --git a/pkg/resource_customizations/argoproj.io/Experiment/testdata/pendingExperiment.yaml b/pkg/resource_customizations/argoproj.io/Experiment/testdata/pendingExperiment.yaml new file mode 100644 index 0000000..9b74c70 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Experiment/testdata/pendingExperiment.yaml @@ -0,0 +1,47 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + name: experiment-with-analysis-5hm74 + namespace: default +spec: + analyses: + - name: job + templateName: job + duration: 3600 + templates: + - name: baseline + selector: + matchLabels: + app: rollouts-demo + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - image: argoproj/rollouts-demo:blue + name: rollouts-demo +status: + analysisRuns: + - analysisRun: experiment-with-analysis-5hm74-job-h4bgb + name: job + phase: Running + availableAt: "2019-10-21T03:40:28Z" + conditions: + - lastTransitionTime: "2019-10-21T03:40:28Z" + lastUpdateTime: "2019-10-21T03:40:28Z" + message: Experiment "experiment-with-analysis-5hm74" has successfully ran and + completed. + reason: ExperimentCompleted + phase: "False" + type: Progressing + running: false + phase: Pending + templateStatuses: + - availableReplicas: 0 + lastTransitionTime: "2019-10-28T20:22:01Z" + name: baseline + readyReplicas: 0 + replicas: 0 + phase: Progressing + updatedReplicas: 0 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Experiment/testdata/runningExperiment.yaml b/pkg/resource_customizations/argoproj.io/Experiment/testdata/runningExperiment.yaml new file mode 100644 index 0000000..4cbe8ab --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Experiment/testdata/runningExperiment.yaml @@ -0,0 +1,40 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + name: example-experiment + namespace: argo-rollouts +spec: + duration: 60 + templates: + - name: baseline + selector: + matchLabels: + app: rollouts-demo + color: blue + template: + metadata: + labels: + app: rollouts-demo + color: blue + spec: + containers: + - image: 'argoproj/rollouts-demo:blue' + name: guestbook +status: + availableAt: '2019-10-28T20:15:02Z' + conditions: + - lastTransitionTime: '2019-10-28T20:14:59Z' + lastUpdateTime: '2019-10-28T20:15:02Z' + message: Experiment "example-experiment" is running. + reason: NewReplicaSetAvailable + phase: 'True' + type: Progressing + phase: Running + templateStatuses: + - availableReplicas: 1 + lastTransitionTime: '2019-10-28T20:15:02Z' + name: baseline + readyReplicas: 1 + replicas: 1 + phase: Running + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Experiment/testdata/successfulExperiment.yaml b/pkg/resource_customizations/argoproj.io/Experiment/testdata/successfulExperiment.yaml new file mode 100644 index 0000000..5ccc177 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Experiment/testdata/successfulExperiment.yaml @@ -0,0 +1,61 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Experiment +metadata: + name: example-experiment + namespace: argo-rollouts +spec: + duration: 60 + templates: + - name: baseline + selector: + matchLabels: + app: rollouts-demo + color: blue + template: + metadata: + labels: + app: rollouts-demo + color: blue + spec: + containers: + - image: 'argoproj/rollouts-demo:blue' + name: guestbook + - name: canary + selector: + matchLabels: + app: rollouts-demo + color: yellow + template: + metadata: + labels: + app: rollouts-demo + color: yellow + spec: + containers: + - image: 'argoproj/rollouts-demo:yellow' + name: guestbook +status: + availableAt: '2019-10-28T20:15:02Z' + conditions: + - lastTransitionTime: '2019-10-28T20:20:54Z' + lastUpdateTime: '2019-10-28T20:20:54Z' + message: Experiment "example-experiment" has successfully ran and completed. + reason: ExperimentCompleted + phase: 'False' + type: Progressing + phase: Successful + templateStatuses: + - availableReplicas: 1 + lastTransitionTime: '2019-10-28T20:15:02Z' + name: baseline + readyReplicas: 1 + replicas: 1 + phase: Successful + updatedReplicas: 1 + - availableReplicas: 1 + lastTransitionTime: '2019-10-28T20:15:01Z' + name: canary + readyReplicas: 1 + replicas: 1 + phase: Successful + updatedReplicas: 1 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/abort/action.lua b/pkg/resource_customizations/argoproj.io/Rollout/actions/abort/action.lua new file mode 100644 index 0000000..d0177ea --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/abort/action.lua @@ -0,0 +1,2 @@ +obj.status.abort = true +return obj diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/action_test.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/action_test.yaml new file mode 100644 index 0000000..f3c3057 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/action_test.yaml @@ -0,0 +1,131 @@ +discoveryTests: +- inputPath: testdata/pre_v0.6_paused_rollout.yaml + result: + - name: resume + disabled: false + - name: restart + disabled: false + - name: abort + disabled: false + - name: retry + disabled: true + - name: promote-full + disabled: true +- inputPath: testdata/pre_v0.6_not_paused_rollout.yaml + result: + - name: restart + disabled: false + - name: resume + disabled: true + - name: abort + disabled: false + - name: retry + disabled: true + - name: promote-full + disabled: true +- inputPath: testdata/pre_v0.6_nil_paused_rollout.yaml + result: + - name: restart + disabled: false + - name: resume + disabled: true + - name: abort + disabled: false + - name: retry + disabled: true + - name: promote-full + disabled: true +- inputPath: testdata/has_pause_condition_rollout.yaml + result: + - name: restart + disabled: false + - name: resume + disabled: false + - name: abort + disabled: false + - name: retry + disabled: true + - name: promote-full + disabled: false +- inputPath: testdata/no_pause_condition_rollout.yaml + result: + - name: restart + disabled: false + - name: resume + disabled: true + - name: abort + disabled: false + - name: retry + disabled: true + - name: promote-full + disabled: false +- inputPath: testdata/healthy_rollout.yaml + result: + - name: restart + disabled: false + - name: resume + disabled: true + - name: abort + disabled: true + - name: retry + disabled: true + - name: promote-full + disabled: true +- inputPath: testdata/v0.9_aborted_rollout.yaml + result: + - name: restart + disabled: false + - name: resume + disabled: true + - name: abort + disabled: true + - name: retry + disabled: false + - name: promote-full + disabled: false +- inputPath: testdata/v0.9_aborted_bg_rollout.yaml + result: + - name: restart + disabled: false + - name: resume + disabled: true + - name: abort + disabled: true + - name: retry + disabled: false + - name: promote-full + disabled: true +- inputPath: testdata/aborted_bg_rollout.yaml + result: + - name: restart + disabled: false + - name: resume + disabled: true + - name: abort + disabled: true + - name: retry + disabled: false + - name: promote-full + disabled: false +actionTests: +- action: resume + inputPath: testdata/pre_v0.6_paused_rollout.yaml + expectedOutputPath: testdata/pre_v0.6_not_paused_rollout.yaml +- action: resume + inputPath: testdata/has_pause_condition_rollout.yaml + expectedOutputPath: testdata/no_pause_condition_rollout.yaml +- action: abort + inputPath: testdata/has_pause_condition_rollout.yaml + expectedOutputPath: testdata/has_pause_condition_rollout_aborted.yaml +- action: restart + inputPath: testdata/rollout_not_restarted.yaml + expectedOutputPath: testdata/rollout_restarted.yaml +- action: retry + inputPath: testdata/v0.9_aborted_rollout.yaml + expectedOutputPath: testdata/retried_rollout.yaml +- action: promote-full + inputPath: testdata/v0.9_aborted_rollout.yaml + expectedOutputPath: testdata/v0.9_promote-full_rollout.yaml +- action: promote-full + inputPath: testdata/aborted_rollout.yaml + expectedOutputPath: testdata/promote-full_rollout.yaml diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/discovery.lua b/pkg/resource_customizations/argoproj.io/Rollout/actions/discovery.lua new file mode 100644 index 0000000..86a5307 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/discovery.lua @@ -0,0 +1,28 @@ +local actions = {} +actions["restart"] = {["disabled"] = false} + +local paused = false +if obj.status ~= nil and obj.status.pauseConditions ~= nil then + paused = table.getn(obj.status.pauseConditions) > 0 +elseif obj.spec.paused ~= nil then + paused = obj.spec.paused +end +actions["resume"] = {["disabled"] = not(paused)} + +local fullyPromoted = obj.status.currentPodHash == obj.status.stableRS +actions["abort"] = {["disabled"] = fullyPromoted or obj.status.abort} +actions["retry"] = {["disabled"] = fullyPromoted or not(obj.status.abort)} + +actions["promote-full"] = {["disabled"] = true} +if obj.status ~= nil and not(fullyPromoted) then + local generation = tonumber(obj.status.observedGeneration) + if generation == nil or generation > obj.metadata.generation then + -- rollouts v0.9 - full promotion only supported for canary + actions["promote-full"] = {["disabled"] = obj.spec.strategy.blueGreen ~= nil} + else + -- rollouts v0.10+ + actions["promote-full"]["disabled"] = false + end +end + +return actions diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/promote-full/action.lua b/pkg/resource_customizations/argoproj.io/Rollout/actions/promote-full/action.lua new file mode 100644 index 0000000..9080599 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/promote-full/action.lua @@ -0,0 +1,14 @@ +if obj.status ~= nil then + local generation = tonumber(obj.status.observedGeneration) + if generation == nil or generation > obj.metadata.generation then + -- rollouts v0.9 and below + obj.status.abort = nil + if obj.spec.strategy.canary.steps ~= nil then + obj.status.currentStepIndex = table.getn(obj.spec.strategy.canary.steps) + end + else + -- rollouts v0.10+ + obj.status.promoteFull = true + end +end +return obj diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/restart/action.lua b/pkg/resource_customizations/argoproj.io/Rollout/actions/restart/action.lua new file mode 100644 index 0000000..7f9a9ec --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/restart/action.lua @@ -0,0 +1,3 @@ +local os = require("os") +obj.spec.restartAt = os.date("!%Y-%m-%dT%XZ") +return obj diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/resume/action.lua b/pkg/resource_customizations/argoproj.io/Rollout/actions/resume/action.lua new file mode 100644 index 0000000..9aa2aa2 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/resume/action.lua @@ -0,0 +1,9 @@ +if obj.status.pauseConditions ~= nil and table.getn(obj.status.pauseConditions) > 0 then + obj.status.pauseConditions = nil +end + +if obj.spec.paused ~= nil and obj.spec.paused then + obj.spec.paused = false +end + +return obj \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/retry/action.lua b/pkg/resource_customizations/argoproj.io/Rollout/actions/retry/action.lua new file mode 100644 index 0000000..844e596 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/retry/action.lua @@ -0,0 +1,2 @@ +obj.status.abort = nil +return obj diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/aborted_bg_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/aborted_bg_rollout.yaml new file mode 100644 index 0000000..747fe86 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/aborted_bg_rollout.yaml @@ -0,0 +1,63 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "2" + creationTimestamp: "2020-11-13T08:37:51Z" + generation: 3 + name: bluegreen + namespace: argocd-e2e + resourceVersion: "202207" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/bluegreen + uid: 39d30e1e-5e0e-460a-a217-fa21215f1d1f +spec: + replicas: 3 + selector: + matchLabels: + app: bluegreen + strategy: + blueGreen: + activeService: bluegreen + autoPromotionEnabled: false + scaleDownDelaySeconds: 10 + template: + metadata: + creationTimestamp: null + labels: + app: bluegreen + spec: + containers: + - image: nginx:1.18-alpine + name: bluegreen + resources: + requests: + cpu: 1m + memory: 16Mi +status: + HPAReplicas: 3 + abort: true + abortedAt: "2020-11-13T08:38:19Z" + availableReplicas: 3 + blueGreen: + activeSelector: 54bd6f9c67 + canary: {} + conditions: + - lastTransitionTime: "2020-11-13T08:37:53Z" + lastUpdateTime: "2020-11-13T08:37:53Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2020-11-13T08:38:19Z" + lastUpdateTime: "2020-11-13T08:38:19Z" + message: Rollout is aborted + reason: RolloutAborted + status: "False" + type: Progressing + currentPodHash: 5b6f6b55c4 + observedGeneration: "3" + readyReplicas: 3 + replicas: 6 + selector: app=bluegreen,rollouts-pod-template-hash=54bd6f9c67 + stableRS: 54bd6f9c67 + updatedReplicas: 3 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/aborted_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/aborted_rollout.yaml new file mode 100644 index 0000000..03e76d4 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/aborted_rollout.yaml @@ -0,0 +1,63 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "2" + creationTimestamp: "2020-11-13T08:25:35Z" + generation: 3 + name: basic + namespace: argocd-e2e + resourceVersion: "201579" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/basic + uid: 201161e2-c761-4e52-91a1-d4872be9ead4 +spec: + replicas: 1 + selector: + matchLabels: + app: basic + strategy: + canary: + steps: + - setWeight: 50 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: basic + spec: + containers: + - image: nginx:1.18-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi +status: + HPAReplicas: 1 + abort: true + abortedAt: "2020-11-13T08:25:53Z" + availableReplicas: 1 + blueGreen: {} + canary: {} + conditions: + - lastTransitionTime: "2020-11-13T08:25:36Z" + lastUpdateTime: "2020-11-13T08:25:36Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2020-11-13T08:25:53Z" + lastUpdateTime: "2020-11-13T08:25:53Z" + message: Rollout is aborted + reason: RolloutAborted + status: "False" + type: Progressing + currentPodHash: 75fdb4ccf6 + currentStepHash: 757f5f97b + currentStepIndex: 0 + observedGeneration: "3" + readyReplicas: 1 + replicas: 1 + selector: app=basic + stableRS: 754cb84d5 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/has_pause_condition_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/has_pause_condition_rollout.yaml new file mode 100644 index 0000000..6d64aeb --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/has_pause_condition_rollout.yaml @@ -0,0 +1,61 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: canary-demo + namespace: default +spec: + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: canary-demo + strategy: + canary: + analysis: + name: analysis + templateName: analysis-template + canaryService: canary-demo-preview + steps: + - setWeight: 40 + - pause: {} + - setWeight: 60 + - pause: {} + - setWeight: 80 + - pause: + duration: 10 + template: + metadata: + labels: + app: canary-demo + spec: + containers: + - image: argoproj/rollouts-demo:yellow + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + currentBackgroundAnalysisRun: canary-demo-6758949f55-6-analysis + stableRS: 645d5dbc4c + controllerPause: true + currentPodHash: 6758949f55 + currentStepHash: 59f8666948 + currentStepIndex: 1 + observedGeneration: 58b949649c + pauseConditions: + - reason: CanaryPauseStep + startTime: "2019-11-05T18:10:29Z" + readyReplicas: 5 + replicas: 5 + selector: app=canary-demo + updatedReplicas: 2 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/has_pause_condition_rollout_aborted.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/has_pause_condition_rollout_aborted.yaml new file mode 100644 index 0000000..5c118c2 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/has_pause_condition_rollout_aborted.yaml @@ -0,0 +1,62 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: canary-demo + namespace: default +spec: + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: canary-demo + strategy: + canary: + analysis: + name: analysis + templateName: analysis-template + canaryService: canary-demo-preview + steps: + - setWeight: 40 + - pause: {} + - setWeight: 60 + - pause: {} + - setWeight: 80 + - pause: + duration: 10 + template: + metadata: + labels: + app: canary-demo + spec: + containers: + - image: argoproj/rollouts-demo:yellow + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + abort: true + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + currentBackgroundAnalysisRun: canary-demo-6758949f55-6-analysis + stableRS: 645d5dbc4c + controllerPause: true + currentPodHash: 6758949f55 + currentStepHash: 59f8666948 + currentStepIndex: 1 + observedGeneration: 58b949649c + pauseConditions: + - reason: CanaryPauseStep + startTime: "2019-11-05T18:10:29Z" + readyReplicas: 5 + replicas: 5 + selector: app=canary-demo + updatedReplicas: 2 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/healthy_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/healthy_rollout.yaml new file mode 100644 index 0000000..8a20b4b --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/healthy_rollout.yaml @@ -0,0 +1,64 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: '2' + creationTimestamp: '2020-11-06T09:09:54Z' + generation: 12 + labels: + app.kubernetes.io/instance: rollouts-demo + name: rollout-bluegreen + namespace: default + resourceVersion: '2232' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/rollout-bluegreen + uid: a5047899-8288-43c2-95d7-a8e0a8b45ed6 +spec: + replicas: 2 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-bluegreen + strategy: + blueGreen: + activeService: rollout-bluegreen-active + previewService: rollout-bluegreen-preview + template: + metadata: + creationTimestamp: null + labels: + app: rollout-bluegreen + spec: + containers: + - image: 'nginx:1.19-alpine' + imagePullPolicy: Always + name: rollouts-demo + ports: + - containerPort: 8080 + resources: {} +status: + HPAReplicas: 2 + availableReplicas: 2 + blueGreen: + activeSelector: '8576595585' + previewSelector: '8576595585' + canary: {} + conditions: + - lastTransitionTime: '2020-11-06T09:10:35Z' + lastUpdateTime: '2020-11-06T09:10:35Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + - lastTransitionTime: '2020-11-06T09:09:54Z' + lastUpdateTime: '2020-11-06T09:13:09Z' + message: ReplicaSet "rollout-bluegreen-8576595585" has successfully progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + currentPodHash: '8576595585' + observedGeneration: 7b965d5d74 + readyReplicas: 2 + replicas: 2 + selector: 'app=rollout-bluegreen,rollouts-pod-template-hash=8576595585' + stableRS: '8576595585' + updatedReplicas: 2 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/no_pause_condition_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/no_pause_condition_rollout.yaml new file mode 100644 index 0000000..fb1ba19 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/no_pause_condition_rollout.yaml @@ -0,0 +1,58 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: canary-demo + namespace: default +spec: + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: canary-demo + strategy: + canary: + analysis: + name: analysis + templateName: analysis-template + canaryService: canary-demo-preview + steps: + - setWeight: 40 + - pause: {} + - setWeight: 60 + - pause: {} + - setWeight: 80 + - pause: + duration: 10 + template: + metadata: + labels: + app: canary-demo + spec: + containers: + - image: argoproj/rollouts-demo:yellow + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + currentBackgroundAnalysisRun: canary-demo-6758949f55-6-analysis + stableRS: 645d5dbc4c + controllerPause: true + currentPodHash: 6758949f55 + currentStepHash: 59f8666948 + currentStepIndex: 1 + observedGeneration: 58b949649c + readyReplicas: 5 + replicas: 5 + selector: app=canary-demo + updatedReplicas: 2 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/one_replica_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/one_replica_rollout.yaml new file mode 100644 index 0000000..949e4ea --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/one_replica_rollout.yaml @@ -0,0 +1,51 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + clusterName: "" + creationTimestamp: 2019-03-22T21:04:31Z + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-bluegreen + name: guestbook-bluegreen + namespace: default + resourceVersion: "888906" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-bluegreen + uid: 16a1edf0-4ce6-11e9-994f-025000000001 +spec: + minReadySeconds: 30 + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: guestbook-bluegreen + strategy: + blueGreen: + activeService: guestbook-bluegreen-active + previewService: guestbook-bluegreen-preview + template: + metadata: + labels: + app: guestbook-bluegreen + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-bluegreen + ports: + - containerPort: 80 +status: + availableReplicas: 1 + blueGreen: + activeSelector: 6c767bd46c + conditions: + - lastTransitionTime: 2019-04-01T22:31:44Z + lastUpdateTime: 2019-04-01T22:31:44Z + message: Rollout is serving traffic from the active service. + reason: Available + status: "True" + type: Available + currentPodHash: 6c767bd46c + observedGeneration: 869957df4b + pauseStartTime: 2019-03-26T05:47:32Z + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_nil_paused_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_nil_paused_rollout.yaml new file mode 100644 index 0000000..949e4ea --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_nil_paused_rollout.yaml @@ -0,0 +1,51 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + clusterName: "" + creationTimestamp: 2019-03-22T21:04:31Z + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-bluegreen + name: guestbook-bluegreen + namespace: default + resourceVersion: "888906" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-bluegreen + uid: 16a1edf0-4ce6-11e9-994f-025000000001 +spec: + minReadySeconds: 30 + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: guestbook-bluegreen + strategy: + blueGreen: + activeService: guestbook-bluegreen-active + previewService: guestbook-bluegreen-preview + template: + metadata: + labels: + app: guestbook-bluegreen + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-bluegreen + ports: + - containerPort: 80 +status: + availableReplicas: 1 + blueGreen: + activeSelector: 6c767bd46c + conditions: + - lastTransitionTime: 2019-04-01T22:31:44Z + lastUpdateTime: 2019-04-01T22:31:44Z + message: Rollout is serving traffic from the active service. + reason: Available + status: "True" + type: Available + currentPodHash: 6c767bd46c + observedGeneration: 869957df4b + pauseStartTime: 2019-03-26T05:47:32Z + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_not_paused_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_not_paused_rollout.yaml new file mode 100644 index 0000000..8e81cff --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_not_paused_rollout.yaml @@ -0,0 +1,52 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + clusterName: "" + creationTimestamp: 2019-03-22T21:04:31Z + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-bluegreen + name: guestbook-bluegreen + namespace: default + resourceVersion: "888906" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-bluegreen + uid: 16a1edf0-4ce6-11e9-994f-025000000001 +spec: + minReadySeconds: 30 + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: guestbook-bluegreen + strategy: + blueGreen: + activeService: guestbook-bluegreen-active + previewService: guestbook-bluegreen-preview + template: + metadata: + labels: + app: guestbook-bluegreen + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-bluegreen + ports: + - containerPort: 80 + paused: false +status: + availableReplicas: 1 + blueGreen: + activeSelector: 6c767bd46c + conditions: + - lastTransitionTime: 2019-04-01T22:31:44Z + lastUpdateTime: 2019-04-01T22:31:44Z + message: Rollout is serving traffic from the active service. + reason: Available + status: "True" + type: Available + currentPodHash: 6c767bd46c + observedGeneration: 869957df4b + pauseStartTime: 2019-03-26T05:47:32Z + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_paused_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_paused_rollout.yaml new file mode 100644 index 0000000..0c7062d --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/pre_v0.6_paused_rollout.yaml @@ -0,0 +1,52 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + clusterName: "" + creationTimestamp: 2019-03-22T21:04:31Z + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-bluegreen + name: guestbook-bluegreen + namespace: default + resourceVersion: "888906" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-bluegreen + uid: 16a1edf0-4ce6-11e9-994f-025000000001 +spec: + minReadySeconds: 30 + paused: true + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: guestbook-bluegreen + strategy: + blueGreen: + activeService: guestbook-bluegreen-active + previewService: guestbook-bluegreen-preview + template: + metadata: + labels: + app: guestbook-bluegreen + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-bluegreen + ports: + - containerPort: 80 +status: + availableReplicas: 1 + blueGreen: + activeSelector: 6c767bd46c + conditions: + - lastTransitionTime: 2019-04-01T22:31:44Z + lastUpdateTime: 2019-04-01T22:31:44Z + message: Rollout is serving traffic from the active service. + reason: Available + status: "True" + type: Available + currentPodHash: 6c767bd46c + observedGeneration: 869957df4b + pauseStartTime: 2019-03-26T05:47:32Z + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/promote-full_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/promote-full_rollout.yaml new file mode 100644 index 0000000..b16dbc3 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/promote-full_rollout.yaml @@ -0,0 +1,64 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "2" + creationTimestamp: "2020-11-13T08:25:35Z" + generation: 3 + name: basic + namespace: argocd-e2e + resourceVersion: "201579" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/basic + uid: 201161e2-c761-4e52-91a1-d4872be9ead4 +spec: + replicas: 1 + selector: + matchLabels: + app: basic + strategy: + canary: + steps: + - setWeight: 50 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: basic + spec: + containers: + - image: nginx:1.18-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi +status: + promoteFull: true + HPAReplicas: 1 + abort: true + abortedAt: "2020-11-13T08:25:53Z" + availableReplicas: 1 + blueGreen: {} + canary: {} + conditions: + - lastTransitionTime: "2020-11-13T08:25:36Z" + lastUpdateTime: "2020-11-13T08:25:36Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2020-11-13T08:25:53Z" + lastUpdateTime: "2020-11-13T08:25:53Z" + message: Rollout is aborted + reason: RolloutAborted + status: "False" + type: Progressing + currentPodHash: 75fdb4ccf6 + currentStepHash: 757f5f97b + currentStepIndex: 0 + observedGeneration: "3" + readyReplicas: 1 + replicas: 1 + selector: app=basic + stableRS: 754cb84d5 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/retried_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/retried_rollout.yaml new file mode 100644 index 0000000..97a0013 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/retried_rollout.yaml @@ -0,0 +1,81 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: '4' + creationTimestamp: '2020-11-06T09:09:54Z' + generation: 76 + labels: + app.kubernetes.io/instance: rollouts-demo + name: rollout-canary + namespace: default + resourceVersion: '4977' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/rollout-canary + uid: a5047899-8288-43c2-95d7-a8e0a8b45ed6 +spec: + replicas: 2 + restartAt: '2020-11-06T10:03:31Z' + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-canary + strategy: + canary: + steps: + - setWeight: 1 + - pause: {} + template: + metadata: + annotations: + restart: asdfaaa + labels: + app: rollout-canary + spec: + containers: + - image: 'nginx:1.19-alpine' + imagePullPolicy: Always + lifecycle: + postStart: + exec: + command: + - sleep + - '30' + preStop: + exec: + command: + - sleep + - '30' + name: rollouts-demo + ports: + - containerPort: 8080 + resources: {} +status: + HPAReplicas: 2 + abort: null + abortedAt: '2020-11-06T10:08:32Z' + availableReplicas: 2 + blueGreen: {} + canary: + stableRS: 69d59f5445 + conditions: + - lastTransitionTime: '2020-11-06T10:06:38Z' + lastUpdateTime: '2020-11-06T10:06:38Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + - lastTransitionTime: '2020-11-06T10:08:32Z' + lastUpdateTime: '2020-11-06T10:08:32Z' + message: Rollout is aborted + reason: RolloutAborted + status: 'False' + type: Progressing + currentPodHash: 7797495b94 + currentStepHash: 566d47875b + currentStepIndex: 0 + observedGeneration: 74dbb4676d + readyReplicas: 2 + replicas: 2 + restartedAt: '2020-11-06T10:03:31Z' + selector: app=rollout-canary + stableRS: 69d59f5445 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/rollout_not_restarted.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/rollout_not_restarted.yaml new file mode 100644 index 0000000..72376d0 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/rollout_not_restarted.yaml @@ -0,0 +1,47 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: canary-demo + namespace: default +spec: + replicas: 5 + revisionHistoryLimit: 1 + selector: + matchLabels: + app: canary-demo + strategy: + canary: + analysis: + args: + - name: ingress + value: canary-demo + templateName: success-rate + canaryService: canary-demo-preview + maxSurge: 1 + maxUnavailable: 1 + steps: + - setWeight: 40 + - pause: {} + - setWeight: 60 + - pause: + duration: 10 + - setWeight: 80 + - pause: + duration: 10 + template: + metadata: + labels: + app: canary-demo + spec: + containers: + - image: argoproj/rollouts-demo:red + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/rollout_restarted.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/rollout_restarted.yaml new file mode 100644 index 0000000..fb6005a --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/rollout_restarted.yaml @@ -0,0 +1,48 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: canary-demo + namespace: default +spec: + replicas: 5 + restartAt: "0001-01-01T00:00:00Z" + revisionHistoryLimit: 1 + selector: + matchLabels: + app: canary-demo + strategy: + canary: + analysis: + args: + - name: ingress + value: canary-demo + templateName: success-rate + canaryService: canary-demo-preview + maxSurge: 1 + maxUnavailable: 1 + steps: + - setWeight: 40 + - pause: {} + - setWeight: 60 + - pause: + duration: 10 + - setWeight: 80 + - pause: + duration: 10 + template: + metadata: + labels: + app: canary-demo + spec: + containers: + - image: argoproj/rollouts-demo:red + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/three_replica_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/three_replica_rollout.yaml new file mode 100644 index 0000000..2a531f6 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/three_replica_rollout.yaml @@ -0,0 +1,51 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + clusterName: "" + creationTimestamp: 2019-03-22T21:04:31Z + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-bluegreen + name: guestbook-bluegreen + namespace: default + resourceVersion: "888906" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-bluegreen + uid: 16a1edf0-4ce6-11e9-994f-025000000001 +spec: + minReadySeconds: 30 + replicas: 3 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: guestbook-bluegreen + strategy: + blueGreen: + activeService: guestbook-bluegreen-active + previewService: guestbook-bluegreen-preview + template: + metadata: + labels: + app: guestbook-bluegreen + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-bluegreen + ports: + - containerPort: 80 +status: + availableReplicas: 1 + blueGreen: + activeSelector: 6c767bd46c + conditions: + - lastTransitionTime: 2019-04-01T22:31:44Z + lastUpdateTime: 2019-04-01T22:31:44Z + message: Rollout is serving traffic from the active service. + reason: Available + status: "True" + type: Available + currentPodHash: 6c767bd46c + observedGeneration: 869957df4b + pauseStartTime: 2019-03-26T05:47:32Z + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_aborted_bg_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_aborted_bg_rollout.yaml new file mode 100644 index 0000000..4a2c883 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_aborted_bg_rollout.yaml @@ -0,0 +1,63 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "2" + creationTimestamp: "2020-11-13T08:37:51Z" + generation: 3 + name: bluegreen + namespace: argocd-e2e + resourceVersion: "202207" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/bluegreen + uid: 39d30e1e-5e0e-460a-a217-fa21215f1d1f +spec: + replicas: 3 + selector: + matchLabels: + app: bluegreen + strategy: + blueGreen: + activeService: bluegreen + autoPromotionEnabled: false + scaleDownDelaySeconds: 10 + template: + metadata: + creationTimestamp: null + labels: + app: bluegreen + spec: + containers: + - image: nginx:1.18-alpine + name: bluegreen + resources: + requests: + cpu: 1m + memory: 16Mi +status: + HPAReplicas: 3 + abort: true + abortedAt: "2020-11-13T08:38:19Z" + availableReplicas: 3 + blueGreen: + activeSelector: 54bd6f9c67 + canary: {} + conditions: + - lastTransitionTime: "2020-11-13T08:37:53Z" + lastUpdateTime: "2020-11-13T08:37:53Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2020-11-13T08:38:19Z" + lastUpdateTime: "2020-11-13T08:38:19Z" + message: Rollout is aborted + reason: RolloutAborted + status: "False" + type: Progressing + currentPodHash: 5b6f6b55c4 + observedGeneration: "abc123" + readyReplicas: 3 + replicas: 6 + selector: app=bluegreen,rollouts-pod-template-hash=54bd6f9c67 + stableRS: 54bd6f9c67 + updatedReplicas: 3 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_aborted_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_aborted_rollout.yaml new file mode 100644 index 0000000..9900b51 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_aborted_rollout.yaml @@ -0,0 +1,81 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: '4' + creationTimestamp: '2020-11-06T09:09:54Z' + generation: 76 + labels: + app.kubernetes.io/instance: rollouts-demo + name: rollout-canary + namespace: default + resourceVersion: '4977' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/rollout-canary + uid: a5047899-8288-43c2-95d7-a8e0a8b45ed6 +spec: + replicas: 2 + restartAt: '2020-11-06T10:03:31Z' + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-canary + strategy: + canary: + steps: + - setWeight: 1 + - pause: {} + template: + metadata: + annotations: + restart: asdfaaa + labels: + app: rollout-canary + spec: + containers: + - image: 'nginx:1.19-alpine' + imagePullPolicy: Always + lifecycle: + postStart: + exec: + command: + - sleep + - '30' + preStop: + exec: + command: + - sleep + - '30' + name: rollouts-demo + ports: + - containerPort: 8080 + resources: {} +status: + HPAReplicas: 2 + abort: true + abortedAt: '2020-11-06T10:08:32Z' + availableReplicas: 2 + blueGreen: {} + canary: + stableRS: 69d59f5445 + conditions: + - lastTransitionTime: '2020-11-06T10:06:38Z' + lastUpdateTime: '2020-11-06T10:06:38Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + - lastTransitionTime: '2020-11-06T10:08:32Z' + lastUpdateTime: '2020-11-06T10:08:32Z' + message: Rollout is aborted + reason: RolloutAborted + status: 'False' + type: Progressing + currentPodHash: 7797495b94 + currentStepHash: 566d47875b + currentStepIndex: 0 + observedGeneration: 74dbb4676d + readyReplicas: 2 + replicas: 2 + restartedAt: '2020-11-06T10:03:31Z' + selector: app=rollout-canary + stableRS: 69d59f5445 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_promote-full_rollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_promote-full_rollout.yaml new file mode 100644 index 0000000..d917ffa --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/actions/testdata/v0.9_promote-full_rollout.yaml @@ -0,0 +1,81 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: '4' + creationTimestamp: '2020-11-06T09:09:54Z' + generation: 76 + labels: + app.kubernetes.io/instance: rollouts-demo + name: rollout-canary + namespace: default + resourceVersion: '4977' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/rollout-canary + uid: a5047899-8288-43c2-95d7-a8e0a8b45ed6 +spec: + replicas: 2 + restartAt: '2020-11-06T10:03:31Z' + revisionHistoryLimit: 2 + selector: + matchLabels: + app: rollout-canary + strategy: + canary: + steps: + - setWeight: 1 + - pause: {} + template: + metadata: + annotations: + restart: asdfaaa + labels: + app: rollout-canary + spec: + containers: + - image: 'nginx:1.19-alpine' + imagePullPolicy: Always + lifecycle: + postStart: + exec: + command: + - sleep + - '30' + preStop: + exec: + command: + - sleep + - '30' + name: rollouts-demo + ports: + - containerPort: 8080 + resources: {} +status: + HPAReplicas: 2 + abort: null + abortedAt: '2020-11-06T10:08:32Z' + availableReplicas: 2 + blueGreen: {} + canary: + stableRS: 69d59f5445 + conditions: + - lastTransitionTime: '2020-11-06T10:06:38Z' + lastUpdateTime: '2020-11-06T10:06:38Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + - lastTransitionTime: '2020-11-06T10:08:32Z' + lastUpdateTime: '2020-11-06T10:08:32Z' + message: Rollout is aborted + reason: RolloutAborted + status: 'False' + type: Progressing + currentPodHash: 7797495b94 + currentStepHash: 566d47875b + currentStepIndex: 2 + observedGeneration: 74dbb4676d + readyReplicas: 2 + replicas: 2 + restartedAt: '2020-11-06T10:03:31Z' + selector: app=rollout-canary + stableRS: 69d59f5445 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/health.lua b/pkg/resource_customizations/argoproj.io/Rollout/health.lua new file mode 100644 index 0000000..ec733a8 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/health.lua @@ -0,0 +1,165 @@ +function checkReplicasStatus(obj) + local hs = {} + local desiredReplicas = getNumberValueOrDefault(obj.spec.replicas, 1) + statusReplicas = getNumberValueOrDefault(obj.status.replicas, 0) + updatedReplicas = getNumberValueOrDefault(obj.status.updatedReplicas, 0) + local availableReplicas = getNumberValueOrDefault(obj.status.availableReplicas, 0) + + if updatedReplicas < desiredReplicas then + hs.status = "Progressing" + hs.message = "Waiting for roll out to finish: More replicas need to be updated" + return hs + end + if availableReplicas < updatedReplicas then + hs.status = "Progressing" + hs.message = "Waiting for roll out to finish: updated replicas are still becoming available" + return hs + end + return nil +end + +-- In Argo Rollouts v0.8 we deprecated .status.canary.stableRS for .status.stableRS +-- This func grabs the correct one. +function getStableRS(obj) + if obj.status.stableRS ~= nil then + return obj.status.stableRS + end + if obj.status.canary ~= nil then + return obj.status.canary.stableRS + end + return "" +end + +function getNumberValueOrDefault(field, default) + if field ~= nil then + return field + end + return default +end + +function checkPaused(obj) + local hs = {} + hs.status = "Suspended" + hs.message = "Rollout is paused" + if obj.status.pauseConditions ~= nil and table.getn(obj.status.pauseConditions) > 0 then + return hs + end + + if obj.spec.paused ~= nil and obj.spec.paused then + return hs + end + return nil +end + +-- isGenerationObserved determines if the rollout spec has been observed by the controller. This +-- only applies to v0.10 rollout which uses a numeric status.observedGeneration. For v0.9 rollouts +-- and below this function always returns true. +function isGenerationObserved(obj) + if obj.status == nil then + return false + end + observedGeneration = tonumber(obj.status.observedGeneration) + if observedGeneration == nil or observedGeneration > obj.metadata.generation then + -- if we get here, the rollout is a v0.9 rollout + return true + end + return observedGeneration == obj.metadata.generation +end + +-- isWorkloadGenerationObserved determines if the referenced workload's generation spec has been +-- observed by the controller. This only applies to v1.1 rollout +function isWorkloadGenerationObserved(obj) + if obj.spec.workloadRef == nil or obj.metadata.annotations == nil then + -- rollout is v1.0 or earlier + return true + end + local workloadGen = tonumber(obj.metadata.annotations["rollout.argoproj.io/workload-generation"]) + local observedWorkloadGen = tonumber(obj.status.workloadObservedGeneration) + return workloadGen == observedWorkloadGen +end + +local hs = {} +if not isGenerationObserved(obj) or not isWorkloadGenerationObserved(obj) then + hs.status = "Progressing" + hs.message = "Waiting for rollout spec update to be observed" + return hs +end + +-- Argo Rollouts v1.0 has been improved to record a phase/message in status, which Argo CD can blindly surface +if obj.status.phase ~= nil then + if obj.status.phase == "Paused" then + -- Map Rollout's "Paused" status to Argo CD's "Suspended" + hs.status = "Suspended" + else + hs.status = obj.status.phase + end + hs.message = obj.status.message + return hs +end + +for _, condition in ipairs(obj.status.conditions) do + if condition.type == "InvalidSpec" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Progressing" and condition.reason == "RolloutAborted" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Progressing" and condition.reason == "ProgressDeadlineExceeded" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end +end + +local isPaused = checkPaused(obj) +if isPaused ~= nil then + return isPaused +end + +if obj.status.currentPodHash == nil then + hs.status = "Progressing" + hs.message = "Waiting for rollout to finish: status has not been reconciled." + return hs +end + +replicasHS = checkReplicasStatus(obj) +if replicasHS ~= nil then + return replicasHS +end + + +local stableRS = getStableRS(obj) + +if obj.spec.strategy.blueGreen ~= nil then + if obj.status.blueGreen == nil or obj.status.blueGreen.activeSelector ~= obj.status.currentPodHash then + hs.status = "Progressing" + hs.message = "active service cutover pending" + return hs + end + -- Starting in v0.8 blue-green uses status.stableRS. To drop support for v0.7, uncomment following + -- if stableRS == "" or stableRS ~= obj.status.currentPodHash then + if stableRS ~= "" and stableRS ~= obj.status.currentPodHash then + hs.status = "Progressing" + hs.message = "waiting for analysis to complete" + return hs + end +elseif obj.spec.strategy.canary ~= nil then + if statusReplicas > updatedReplicas then + hs.status = "Progressing" + hs.message = "Waiting for roll out to finish: old replicas are pending termination" + return hs + end + if stableRS == "" or stableRS ~= obj.status.currentPodHash then + hs.status = "Progressing" + hs.message = "Waiting for rollout to finish steps" + return hs + end +end + +hs.status = "Healthy" +hs.message = "" +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/health_test.yaml b/pkg/resource_customizations/argoproj.io/Rollout/health_test.yaml new file mode 100644 index 0000000..a9fa6c2 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/health_test.yaml @@ -0,0 +1,94 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for rollout spec update to be observed" + inputPath: testdata/newRolloutWithoutStatus.yaml +- healthStatus: + status: Progressing + message: "Waiting for rollout spec update to be observed" + inputPath: testdata/progressing_newGeneration.yaml +- healthStatus: + status: Progressing + message: "Waiting for rollout spec update to be observed" + inputPath: testdata/progressing_newWorkloadGeneration.yaml +- healthStatus: + status: Degraded + message: "InvalidSpec" + inputPath: testdata/degraded_statusPhaseMessage.yaml +- healthStatus: + status: Healthy + message: "" + inputPath: testdata/healthy_legacy_v0.9_observedGeneration.yaml +- healthStatus: + status: Healthy + message: "" + inputPath: testdata/healthy_legacy_v0.9_observedGeneration_numeric.yaml +- healthStatus: + status: Healthy + message: "" + inputPath: testdata/healthy_legacy_v1.0_newWorkloadGeneration.yaml +- healthStatus: + status: Healthy + message: "" + inputPath: testdata/healthy_newWorkloadGeneration.yaml +- healthStatus: + status: Degraded + message: "The Rollout \"basic\" is invalid: spec.strategy.strategy: Required value: Rollout has missing field '.spec.strategy.canary or .spec.strategy.blueGreen'" + inputPath: testdata/degraded_invalidSpec.yaml +- healthStatus: + status: Degraded + message: ReplicaSet "guestbook-bluegreen-helm-guestbook-6b8cf6f7db" has timed out progressing. + inputPath: testdata/degraded_rolloutTimeout.yaml +- healthStatus: + status: Degraded + message: Rollout is aborted + inputPath: testdata/degraded_abortedRollout.yaml +#BlueGreen +- healthStatus: + status: Healthy + inputPath: testdata/bluegreen/healthy_servingActiveService.yaml +- healthStatus: + status: Progressing + message: "Waiting for roll out to finish: More replicas need to be updated" + inputPath: testdata/bluegreen/progressing_addingMoreReplicas.yaml +- healthStatus: + status: Progressing + message: "Waiting for roll out to finish: updated replicas are still becoming available" + inputPath: testdata/bluegreen/progressing_waitingUntilAvailable.yaml +#Canary +- healthStatus: + status: Progressing + message: "Waiting for roll out to finish: More replicas need to be updated" + inputPath: testdata/canary/progressing_setWeightStep.yaml +- healthStatus: + status: Progressing + message: "Waiting for roll out to finish: old replicas are pending termination" + inputPath: testdata/canary/progressing_killingOldReplicas.yaml +- healthStatus: + status: Suspended + message: Rollout is paused + inputPath: testdata/suspended_controllerPause.yaml +- healthStatus: + status: Suspended + message: Rollout is paused + inputPath: testdata/suspended_userPause.yaml +- healthStatus: + status: Suspended + message: CanaryPauseStep + inputPath: testdata/suspended_v1.0_pausedRollout.yaml +- healthStatus: + status: Healthy + inputPath: testdata/canary/healthy_executedAllStepsPreV0.8.yaml +- healthStatus: + status: Healthy + inputPath: testdata/canary/healthy_executedAllSteps.yaml +- healthStatus: + status: Progressing + message: 'Waiting for roll out to finish: updated replicas are still becoming available' + inputPath: testdata/canary/progressing_noSteps.yaml +- healthStatus: + status: Healthy + inputPath: testdata/canary/healthy_noSteps.yaml +- healthStatus: + status: Healthy + inputPath: testdata/canary/healthy_emptyStepsList.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/healthy_servingActiveService.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/healthy_servingActiveService.yaml new file mode 100644 index 0000000..45000bc --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/healthy_servingActiveService.yaml @@ -0,0 +1,56 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "1" + clusterName: "" + creationTimestamp: 2019-01-22T16:52:54Z + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-default + ksonnet.io/component: guestbook-ui + name: ks-guestbook-ui + namespace: default + resourceVersion: "153353" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/ks-guestbook-ui + uid: 29802403-1e66-11e9-a6a4-025000000001 +spec: + minReadySeconds: 30 + replicas: 1 + selector: + matchLabels: + app: ks-guestbook-ui + strategy: + blueGreen: + activeService: ks-guestbook-ui-active + previewService: ks-guestbook-ui-preview + type: BlueGreenUpdate + template: + metadata: + creationTimestamp: null + labels: + app: ks-guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: ks-guestbook-ui + ports: + - containerPort: 80 + resources: {} +status: + availableReplicas: 1 + blueGreen: + activeSelector: dc689d967 + previewSelector: "" + conditions: + - lastTransitionTime: 2019-01-24T09:51:02Z + lastUpdateTime: 2019-01-24T09:51:02Z + message: Rollout is serving traffic from the active service. + reason: Available + status: "True" + type: Available + currentPodHash: dc689d967 + observedGeneration: 77646c9d4c + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/progressing_addingMoreReplicas.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/progressing_addingMoreReplicas.yaml new file mode 100644 index 0000000..ae3a8e1 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/progressing_addingMoreReplicas.yaml @@ -0,0 +1,54 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "7" + clusterName: "" + creationTimestamp: 2019-01-22T16:52:54Z + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-default + name: ks-guestbook-ui + namespace: default + resourceVersion: "164023" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/ks-guestbook-ui + uid: 29802403-1e66-11e9-a6a4-025000000001 +spec: + minReadySeconds: 30 + replicas: 3 + selector: + matchLabels: + app: ks-guestbook-ui + strategy: + blueGreen: + activeService: ks-guestbook-ui-active + previewService: ks-guestbook-ui-preview + type: BlueGreenUpdate + template: + metadata: + creationTimestamp: null + labels: + app: ks-guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.1 + name: ks-guestbook-ui + ports: + - containerPort: 83 + resources: {} +status: + activeSelector: 85f9884f5d + availableReplicas: 3 + conditions: + - lastTransitionTime: 2019-01-25T07:44:26Z + lastUpdateTime: 2019-01-25T07:44:26Z + message: Rollout is serving traffic from the active service. + reason: Available + status: "True" + type: Available + currentPodHash: 697fb9575c + observedGeneration: 767f98959f + previewSelector: "" + readyReplicas: 3 + replicas: 3 + updatedReplicas: 0 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/progressing_waitingUntilAvailable.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/progressing_waitingUntilAvailable.yaml new file mode 100644 index 0000000..901ddd5 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/bluegreen/progressing_waitingUntilAvailable.yaml @@ -0,0 +1,55 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "1" + clusterName: "" + creationTimestamp: 2019-01-25T16:19:09Z + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-default + ksonnet.io/component: guestbook-ui + name: ks-guestbook-ui + namespace: default + resourceVersion: "164590" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/ks-guestbook-ui + uid: f1b99cb0-20bc-11e9-a811-025000000001 +spec: + minReadySeconds: 30 + replicas: 3 + selector: + matchLabels: + app: ks-guestbook-ui + strategy: + blueGreen: + activeService: ks-guestbook-ui-active + previewService: ks-guestbook-ui-preview + type: BlueGreenUpdate + template: + metadata: + creationTimestamp: null + labels: + app: ks-guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.1 + name: ks-guestbook-ui + ports: + - containerPort: 83 + resources: {} +status: + activeSelector: 697fb9575c + availableReplicas: 0 + conditions: + - lastTransitionTime: 2019-01-25T16:19:09Z + lastUpdateTime: 2019-01-25T16:19:09Z + message: Rollout is not serving traffic from the active service. + reason: Available + status: "False" + type: Available + currentPodHash: 697fb9575c + observedGeneration: 767f98959f + previewSelector: "" + readyReplicas: 3 + replicas: 3 + updatedReplicas: 3 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_emptyStepsList.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_emptyStepsList.yaml new file mode 100644 index 0000000..7b8efea --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_emptyStepsList.yaml @@ -0,0 +1,65 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: '2' + clusterName: '' + creationTimestamp: '2019-05-01T21:55:30Z' + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-canary + ksonnet.io/component: guestbook-ui + name: guestbook-canary + namespace: default + resourceVersion: '956205' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-canary + uid: d6105ccd-6c5b-11e9-b8d7-025000000001 +spec: + minReadySeconds: 10 + replicas: 5 + selector: + matchLabels: + app: guestbook-canary + strategy: + canary: + maxSurge: 1 + maxUnavailable: 0 + steps: [] + template: + metadata: + creationTimestamp: null + labels: + app: guestbook-canary + spec: + containers: + - image: 'gcr.io/heptio-images/ks-guestbook-demo:0.2' + name: guestbook-canary + ports: + - containerPort: 80 + resources: {} +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + stableRS: 567dd56d89 + conditions: + - lastTransitionTime: '2019-05-01T22:00:16Z' + lastUpdateTime: '2019-05-01T22:00:16Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + - lastTransitionTime: '2019-05-01T21:55:30Z' + lastUpdateTime: '2019-05-01T22:00:16Z' + message: ReplicaSet "guestbook-canary-567dd56d89" has successfully progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + currentPodHash: 567dd56d89 + currentStepHash: 6c9545789c + observedGeneration: 6886f85bff + readyReplicas: 5 + replicas: 5 + selector: app=guestbook-canary + updatedReplicas: 5 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_executedAllSteps.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_executedAllSteps.yaml new file mode 100644 index 0000000..a4d5485 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_executedAllSteps.yaml @@ -0,0 +1,73 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"argoproj.io/v1alpha1","kind":"Rollout","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-canary","ksonnet.io/component":"guestbook-ui"},"name":"guestbook-canary","namespace":"default"},"spec":{"minReadySeconds":10,"replicas":5,"selector":{"matchLabels":{"app":"guestbook-canary"}},"strategy":{"canary":{"maxSurge":1,"maxUnavailable":0,"steps":[{"setWeight":20},{"pause":{"duration":30}},{"setWeight":40},{"pause":{}}]}},"template":{"metadata":{"labels":{"app":"guestbook-canary"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-canary","ports":[{"containerPort":80}]}]}}}} + rollout.argoproj.io/revision: '1' + clusterName: '' + creationTimestamp: '2019-05-01T21:55:30Z' + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-canary + ksonnet.io/component: guestbook-ui + name: guestbook-canary + namespace: default + resourceVersion: '955764' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-canary + uid: d6105ccd-6c5b-11e9-b8d7-025000000001 +spec: + minReadySeconds: 10 + replicas: 5 + selector: + matchLabels: + app: guestbook-canary + strategy: + canary: + maxSurge: 1 + maxUnavailable: 0 + steps: + - setWeight: 20 + - pause: + duration: 30 + - setWeight: 40 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: guestbook-canary + spec: + containers: + - image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1' + name: guestbook-canary + ports: + - containerPort: 80 + resources: {} +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: {} + conditions: + - lastTransitionTime: '2019-05-01T21:55:30Z' + lastUpdateTime: '2019-05-01T21:55:58Z' + message: ReplicaSet "guestbook-canary-84ccfddd66" has successfully progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + - lastTransitionTime: '2019-05-01T21:55:58Z' + lastUpdateTime: '2019-05-01T21:55:58Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + currentPodHash: 84ccfddd66 + currentStepHash: 5f8fbdf7bb + currentStepIndex: 4 + observedGeneration: c45557fd9 + readyReplicas: 5 + replicas: 5 + selector: app=guestbook-canary + stableRS: 84ccfddd66 + updatedReplicas: 5 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_executedAllStepsPreV0.8.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_executedAllStepsPreV0.8.yaml new file mode 100644 index 0000000..52073ff --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_executedAllStepsPreV0.8.yaml @@ -0,0 +1,73 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"argoproj.io/v1alpha1","kind":"Rollout","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-canary","ksonnet.io/component":"guestbook-ui"},"name":"guestbook-canary","namespace":"default"},"spec":{"minReadySeconds":10,"replicas":5,"selector":{"matchLabels":{"app":"guestbook-canary"}},"strategy":{"canary":{"maxSurge":1,"maxUnavailable":0,"steps":[{"setWeight":20},{"pause":{"duration":30}},{"setWeight":40},{"pause":{}}]}},"template":{"metadata":{"labels":{"app":"guestbook-canary"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-canary","ports":[{"containerPort":80}]}]}}}} + rollout.argoproj.io/revision: '1' + clusterName: '' + creationTimestamp: '2019-05-01T21:55:30Z' + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-canary + ksonnet.io/component: guestbook-ui + name: guestbook-canary + namespace: default + resourceVersion: '955764' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-canary + uid: d6105ccd-6c5b-11e9-b8d7-025000000001 +spec: + minReadySeconds: 10 + replicas: 5 + selector: + matchLabels: + app: guestbook-canary + strategy: + canary: + maxSurge: 1 + maxUnavailable: 0 + steps: + - setWeight: 20 + - pause: + duration: 30 + - setWeight: 40 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: guestbook-canary + spec: + containers: + - image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1' + name: guestbook-canary + ports: + - containerPort: 80 + resources: {} +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + stableRS: 84ccfddd66 + conditions: + - lastTransitionTime: '2019-05-01T21:55:30Z' + lastUpdateTime: '2019-05-01T21:55:58Z' + message: ReplicaSet "guestbook-canary-84ccfddd66" has successfully progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + - lastTransitionTime: '2019-05-01T21:55:58Z' + lastUpdateTime: '2019-05-01T21:55:58Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + currentPodHash: 84ccfddd66 + currentStepHash: 5f8fbdf7bb + currentStepIndex: 4 + observedGeneration: c45557fd9 + readyReplicas: 5 + replicas: 5 + selector: app=guestbook-canary + updatedReplicas: 5 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_noSteps.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_noSteps.yaml new file mode 100644 index 0000000..092d45c --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/healthy_noSteps.yaml @@ -0,0 +1,66 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"argoproj.io/v1alpha1","kind":"Rollout","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-canary","ksonnet.io/component":"guestbook-ui"},"name":"guestbook-canary","namespace":"default"},"spec":{"minReadySeconds":10,"replicas":5,"selector":{"matchLabels":{"app":"guestbook-canary"}},"strategy":{"canary":{"maxSurge":1,"maxUnavailable":0,"steps":[{"setWeight":20},{"pause":{"duration":30}},{"setWeight":40},{"pause":{}}]}},"template":{"metadata":{"labels":{"app":"guestbook-canary"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-canary","ports":[{"containerPort":80}]}]}}}} + rollout.argoproj.io/revision: '2' + clusterName: '' + creationTimestamp: '2019-05-01T21:55:30Z' + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-canary + ksonnet.io/component: guestbook-ui + name: guestbook-canary + namespace: default + resourceVersion: '956205' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-canary + uid: d6105ccd-6c5b-11e9-b8d7-025000000001 +spec: + minReadySeconds: 10 + replicas: 5 + selector: + matchLabels: + app: guestbook-canary + strategy: + canary: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + creationTimestamp: null + labels: + app: guestbook-canary + spec: + containers: + - image: 'gcr.io/heptio-images/ks-guestbook-demo:0.2' + name: guestbook-canary + ports: + - containerPort: 80 + resources: {} +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + stableRS: 567dd56d89 + conditions: + - lastTransitionTime: '2019-05-01T22:00:16Z' + lastUpdateTime: '2019-05-01T22:00:16Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + - lastTransitionTime: '2019-05-01T21:55:30Z' + lastUpdateTime: '2019-05-01T22:00:16Z' + message: ReplicaSet "guestbook-canary-567dd56d89" has successfully progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + currentPodHash: 567dd56d89 + currentStepHash: 6c9545789c + observedGeneration: 6886f85bff + readyReplicas: 5 + replicas: 5 + selector: app=guestbook-canary + updatedReplicas: 5 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_killingOldReplicas.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_killingOldReplicas.yaml new file mode 100644 index 0000000..aab3d8f --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_killingOldReplicas.yaml @@ -0,0 +1,61 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"argoproj.io/v1alpha1","kind":"Rollout","metadata":{"annotations":{},"name":"example-rollout-canary","namespace":"default"},"spec":{"minReadySeconds":30,"replicas":5,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook"}},"strategy":{"canary":{"steps":[{"setWeight":20},{"pause":{"duration":20}},{"setWeight":40},{"pause":{}}]}},"template":{"metadata":{"labels":{"app":"guestbook"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook","ports":[{"containerPort":80}]}]}}}} + rollout.argoproj.io/revision: "3" + creationTimestamp: "2019-10-20T15:42:26Z" + generation: 101 + name: example-rollout-canary + namespace: default + resourceVersion: "1779901" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/example-rollout-canary + uid: f8ebf794-8b4e-4a1f-a2d1-a85bc1d206ef +spec: + minReadySeconds: 30 + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook + strategy: + canary: {} + template: + metadata: + creationTimestamp: null + labels: + app: guestbook + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.1 + name: guestbook + ports: + - containerPort: 80 + resources: {} +status: + HPAReplicas: 6 + availableReplicas: 5 + blueGreen: {} + canary: + stableRS: 74d6dc8544 + conditions: + - lastTransitionTime: "2019-10-25T16:08:02Z" + lastUpdateTime: "2019-10-25T16:08:02Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2019-10-25T16:50:19Z" + lastUpdateTime: "2019-11-07T18:19:25Z" + message: ReplicaSet "example-rollout-canary-694fb7759c" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + currentPodHash: 694fb7759c + currentStepHash: 5ffbfbbd64 + observedGeneration: 7fcc96c7b7 + readyReplicas: 6 + replicas: 6 + selector: app=guestbook + updatedReplicas: 5 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_noSteps.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_noSteps.yaml new file mode 100644 index 0000000..41e7a07 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_noSteps.yaml @@ -0,0 +1,64 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: '2' + clusterName: '' + creationTimestamp: '2019-05-01T21:55:30Z' + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-canary + ksonnet.io/component: guestbook-ui + name: guestbook-canary + namespace: default + resourceVersion: '956159' + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-canary + uid: d6105ccd-6c5b-11e9-b8d7-025000000001 +spec: + minReadySeconds: 10 + replicas: 5 + selector: + matchLabels: + app: guestbook-canary + strategy: + canary: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + creationTimestamp: null + labels: + app: guestbook-canary + spec: + containers: + - image: 'gcr.io/heptio-images/ks-guestbook-demo:0.2' + name: guestbook-canary + ports: + - containerPort: 80 + resources: {} +status: + HPAReplicas: 6 + availableReplicas: 2 + blueGreen: {} + canary: + stableRS: 567dd56d89 + conditions: + - lastTransitionTime: '2019-05-01T21:59:58Z' + lastUpdateTime: '2019-05-01T21:59:58Z' + message: Rollout does not have minimum availability + reason: AvailableReason + status: 'False' + type: Available + - lastTransitionTime: '2019-05-01T21:55:30Z' + lastUpdateTime: '2019-05-01T22:00:05Z' + message: ReplicaSet "guestbook-canary-567dd56d89" is progressing. + reason: ReplicaSetUpdated + status: 'True' + type: Progressing + currentPodHash: 567dd56d89 + currentStepHash: 6c9545789c + observedGeneration: 6886f85bff + readyReplicas: 4 + replicas: 6 + selector: app=guestbook-canary + updatedReplicas: 5 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_setWeightStep.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_setWeightStep.yaml new file mode 100644 index 0000000..a521be9 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/canary/progressing_setWeightStep.yaml @@ -0,0 +1,69 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"argoproj.io/v1alpha1","kind":"Rollout","metadata":{"annotations":{},"name":"example-rollout-canary","namespace":"default"},"spec":{"minReadySeconds":30,"replicas":5,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook"}},"strategy":{"canary":{"steps":[{"setWeight":20},{"pause":{"duration":20}},{"setWeight":40},{"pause":{}}]}},"template":{"metadata":{"labels":{"app":"guestbook"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook","ports":[{"containerPort":80}]}]}}}} + rollout.argoproj.io/revision: "2" + clusterName: "" + creationTimestamp: 2019-04-26T20:17:43Z + generation: 1 + name: example-rollout-canary + namespace: default + resourceVersion: "696688" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/example-rollout-canary + uid: 58f6f1bb-6860-11e9-b8d7-025000000001 +spec: + minReadySeconds: 30 + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook + strategy: + canary: + steps: + - setWeight: 20 + - pause: + duration: 20 + - setWeight: 40 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: guestbook + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook + ports: + - containerPort: 80 + resources: {} +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + stableRS: df986d68 + conditions: + - lastTransitionTime: 2019-04-26T20:18:05Z + lastUpdateTime: 2019-04-26T20:18:05Z + message: Rollout is not serving traffic from the active service. + reason: Available + status: "False" + type: Available + - lastTransitionTime: 2019-04-26T20:18:58Z + lastUpdateTime: 2019-04-26T20:19:29Z + message: ReplicaSet "example-rollout-canary-6b566f47b7" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + currentPodHash: 6b566f47b7 + currentStepHash: 6567fc959c + currentStepIndex: 3 + observedGeneration: 6df79499bc + readyReplicas: 5 + replicas: 5 + selector: app=guestbook + updatedReplicas: 2 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_abortedRollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_abortedRollout.yaml new file mode 100644 index 0000000..d821d6a --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_abortedRollout.yaml @@ -0,0 +1,70 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: canary-demo + namespace: default +spec: + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: canary-demo + strategy: + canary: + analysis: + name: analysis + templateName: analysis-template + canaryService: canary-demo-preview + steps: + - setWeight: 40 + - pause: {} + - setWeight: 60 + - pause: {} + - setWeight: 80 + - pause: + duration: 10 + template: + metadata: + creationTimestamp: null + labels: + app: canary-demo + spec: + containers: + - image: argoproj/rollouts-demo:yellow + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + HPAReplicas: 5 + abort: true + availableReplicas: 5 + blueGreen: {} + canary: + stableRS: 645d5dbc4c + conditions: + - lastTransitionTime: "2019-11-03T01:32:46Z" + lastUpdateTime: "2019-11-03T01:32:46Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2019-11-05T18:20:12Z" + lastUpdateTime: "2019-11-05T18:20:12Z" + message: Rollout is aborted + reason: RolloutAborted + status: "False" + type: Progressing + currentPodHash: 6758949f55 + currentStepHash: 59f8666948 + currentStepIndex: 0 + observedGeneration: 58b949649c + readyReplicas: 5 + replicas: 5 + selector: app=canary-demo \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_invalidSpec.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_invalidSpec.yaml new file mode 100644 index 0000000..ff48a5d --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_invalidSpec.yaml @@ -0,0 +1,65 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "1" + creationTimestamp: "2020-11-13T00:22:49Z" + generation: 3 + name: basic + namespace: argocd-e2e + resourceVersion: "181746" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/basic + uid: 5b0926f3-30b7-4727-a76e-46c0d2617906 +spec: + replicas: 1 + selector: + matchLabels: + app: basic + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: basic + spec: + containers: + - image: nginx:1.19-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi +status: + HPAReplicas: 1 + availableReplicas: 1 + blueGreen: {} + canary: {} + conditions: + - lastTransitionTime: "2020-11-13T00:22:48Z" + lastUpdateTime: "2020-11-13T00:22:50Z" + message: ReplicaSet "basic-754cb84d5" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2020-11-13T00:22:50Z" + lastUpdateTime: "2020-11-13T00:22:50Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2020-11-13T00:40:30Z" + lastUpdateTime: "2020-11-13T00:40:30Z" + message: 'The Rollout "basic" is invalid: spec.strategy.strategy: Required value: + Rollout has missing field ''.spec.strategy.canary or .spec.strategy.blueGreen''' + reason: InvalidSpec + status: "True" + type: InvalidSpec + currentPodHash: 754cb84d5 + currentStepHash: 757f5f97b + currentStepIndex: 2 + observedGeneration: "3" + readyReplicas: 1 + replicas: 1 + selector: app=basic + stableRS: 754cb84d5 + updatedReplicas: 1 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_rolloutTimeout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_rolloutTimeout.yaml new file mode 100644 index 0000000..bfded3c --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_rolloutTimeout.yaml @@ -0,0 +1,84 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: '4' + clusterName: '' + creationTimestamp: '2019-04-29T21:37:38Z' + generation: 1 + labels: + app: helm-guestbook + app.kubernetes.io/instance: guestbook-bluegreen + chart: helm-guestbook-0.1.0 + heritage: Tiller + release: guestbook-bluegreen + name: guestbook-bluegreen-helm-guestbook + namespace: default + selfLink: >- + /apis/argoproj.io/v1alpha1/namespaces/default/rollouts/guestbook-bluegreen-helm-guestbook +spec: + minReadySeconds: 0 + progressDeadlineSeconds: 32 + replicas: 3 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: helm-guestbook + release: guestbook-bluegreen + strategy: + blueGreen: + activeService: guestbook-bluegreen-helm-guestbook + previewReplicaCount: 1 + previewService: guestbook-bluegreen-helm-guestbook-preview + template: + metadata: + creationTimestamp: null + labels: + app: helm-guestbook + release: guestbook-bluegreen + spec: + containers: + - image: 'gcr.io/heptio-images/ks-guestbook-demo:0.3' + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: / + port: http + name: helm-guestbook + ports: + - containerPort: 80 + name: http + protocol: TCP + readinessProbe: + httpGet: + path: / + port: http + resources: {} +status: + HPAReplicas: 3 + availableReplicas: 3 + blueGreen: + activeSelector: 8464d8564d + canary: {} + conditions: + - lastTransitionTime: '2019-05-01T17:52:59Z' + lastUpdateTime: '2019-05-01T17:52:59Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + - lastTransitionTime: '2019-05-01T21:36:03Z' + lastUpdateTime: '2019-05-01T21:36:03Z' + message: >- + ReplicaSet "guestbook-bluegreen-helm-guestbook-6b8cf6f7db" has timed out + progressing. + reason: ProgressDeadlineExceeded + status: 'False' + type: Progressing + currentPodHash: 6b8cf6f7db + observedGeneration: 7bcdbf7bd9 + readyReplicas: 3 + replicas: 4 + selector: >- + app=helm-guestbook,release=guestbook-bluegreen,rollouts-pod-template-hash=8464d8564d + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_statusPhaseMessage.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_statusPhaseMessage.yaml new file mode 100644 index 0000000..02c161d --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/degraded_statusPhaseMessage.yaml @@ -0,0 +1,50 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + creationTimestamp: "2020-11-13T00:44:55Z" + generation: 1 + name: basic + namespace: argocd-e2e + resourceVersion: "182108" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/basic + uid: 34e4bbfc-222c-4968-bd60-2b30ae81110d +spec: + replicas: 1 + selector: + matchLabels: + app: basic + strategy: + canary: + steps: + - setWeight: 50 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: basic + spec: + containers: + - image: nginx:1.19-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi +status: + HPAReplicas: 1 + availableReplicas: 1 + blueGreen: {} + canary: {} + conditions: {} + phase: "Degraded" + message: "InvalidSpec" + currentPodHash: 754cb84d5 + currentStepHash: 757f5f97b + currentStepIndex: 2 + observedGeneration: "8575574967" ## <---- uses legacy observedGeneration hash which are numbers + readyReplicas: 1 + replicas: 1 + selector: app=basic + stableRS: 754cb84d5 + updatedReplicas: 1 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v0.9_observedGeneration.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v0.9_observedGeneration.yaml new file mode 100644 index 0000000..61d7c1e --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v0.9_observedGeneration.yaml @@ -0,0 +1,60 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + creationTimestamp: "2020-11-13T00:44:55Z" + generation: 1 + name: basic + namespace: argocd-e2e + resourceVersion: "182108" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/basic + uid: 34e4bbfc-222c-4968-bd60-2b30ae81110d +spec: + replicas: 1 + selector: + matchLabels: + app: basic + strategy: + canary: + steps: + - setWeight: 50 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: basic + spec: + containers: + - image: nginx:1.19-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi +status: + HPAReplicas: 1 + availableReplicas: 1 + blueGreen: {} + canary: {} + conditions: + - lastTransitionTime: "2020-11-13T00:48:20Z" + lastUpdateTime: "2020-11-13T00:48:22Z" + message: ReplicaSet "basic-754cb84d5" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2020-11-13T00:48:22Z" + lastUpdateTime: "2020-11-13T00:48:22Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + currentPodHash: 754cb84d5 + currentStepHash: 757f5f97b + currentStepIndex: 2 + observedGeneration: "abc123" ## <---- uses legacy observedGeneration hash + readyReplicas: 1 + replicas: 1 + selector: app=basic + stableRS: 754cb84d5 + updatedReplicas: 1 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v0.9_observedGeneration_numeric.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v0.9_observedGeneration_numeric.yaml new file mode 100644 index 0000000..1281116 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v0.9_observedGeneration_numeric.yaml @@ -0,0 +1,60 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + creationTimestamp: "2020-11-13T00:44:55Z" + generation: 1 + name: basic + namespace: argocd-e2e + resourceVersion: "182108" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/basic + uid: 34e4bbfc-222c-4968-bd60-2b30ae81110d +spec: + replicas: 1 + selector: + matchLabels: + app: basic + strategy: + canary: + steps: + - setWeight: 50 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: basic + spec: + containers: + - image: nginx:1.19-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi +status: + HPAReplicas: 1 + availableReplicas: 1 + blueGreen: {} + canary: {} + conditions: + - lastTransitionTime: "2020-11-13T00:48:20Z" + lastUpdateTime: "2020-11-13T00:48:22Z" + message: ReplicaSet "basic-754cb84d5" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2020-11-13T00:48:22Z" + lastUpdateTime: "2020-11-13T00:48:22Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + currentPodHash: 754cb84d5 + currentStepHash: 757f5f97b + currentStepIndex: 2 + observedGeneration: "8575574967" ## <---- uses legacy observedGeneration hash which are numbers + readyReplicas: 1 + replicas: 1 + selector: app=basic + stableRS: 754cb84d5 + updatedReplicas: 1 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v1.0_newWorkloadGeneration.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v1.0_newWorkloadGeneration.yaml new file mode 100644 index 0000000..f231ff5 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_legacy_v1.0_newWorkloadGeneration.yaml @@ -0,0 +1,58 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "1" + creationTimestamp: "2021-07-27T12:14:11Z" + generation: 3 + name: rollout-ref-deployment + namespace: default + resourceVersion: "4220" + uid: a3d1d224-ac4f-4f84-9274-e01e1d43b036 +spec: + replicas: 5 + strategy: + canary: + steps: + - setWeight: 20 + - pause: + duration: 10s + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: rollout-ref-deployment +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: {} + collisionCount: 1 + conditions: + - lastTransitionTime: "2021-07-27T12:14:21Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: RolloutCompleted + reason: RolloutCompleted + status: "True" + type: Completed + - lastTransitionTime: "2021-07-27T12:14:11Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: ReplicaSet "rollout-ref-deployment-75bbd56864" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2021-07-27T12:14:21Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + currentPodHash: 75bbd56864 + currentStepHash: 55f5d87bd9 + currentStepIndex: 2 + observedGeneration: "3" + phase: Healthy + readyReplicas: 5 + replicas: 5 + selector: app=rollout-ref-deployment + stableRS: 75bbd56864 + updatedReplicas: 5 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_newWorkloadGeneration.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_newWorkloadGeneration.yaml new file mode 100644 index 0000000..4418a81 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/healthy_newWorkloadGeneration.yaml @@ -0,0 +1,60 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "1" + rollout.argoproj.io/workload-generation: "1" + creationTimestamp: "2021-07-27T12:14:11Z" + generation: 3 + name: rollout-ref-deployment + namespace: default + resourceVersion: "4220" + uid: a3d1d224-ac4f-4f84-9274-e01e1d43b036 +spec: + replicas: 5 + strategy: + canary: + steps: + - setWeight: 20 + - pause: + duration: 10s + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: rollout-ref-deployment +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: {} + collisionCount: 1 + conditions: + - lastTransitionTime: "2021-07-27T12:14:21Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: RolloutCompleted + reason: RolloutCompleted + status: "True" + type: Completed + - lastTransitionTime: "2021-07-27T12:14:11Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: ReplicaSet "rollout-ref-deployment-75bbd56864" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2021-07-27T12:14:21Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + currentPodHash: 75bbd56864 + currentStepHash: 55f5d87bd9 + currentStepIndex: 2 + observedGeneration: "3" + phase: Healthy + readyReplicas: 5 + replicas: 5 + selector: app=rollout-ref-deployment + stableRS: 75bbd56864 + updatedReplicas: 5 + workloadObservedGeneration: "1" diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/newRolloutWithoutStatus.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/newRolloutWithoutStatus.yaml new file mode 100644 index 0000000..089529f --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/newRolloutWithoutStatus.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + creationTimestamp: "2020-11-13T00:44:55Z" + generation: 1 + name: basic + namespace: argocd-e2e + resourceVersion: "181938" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/basic + uid: 34e4bbfc-222c-4968-bd60-2b30ae81110d +spec: + selector: + matchLabels: + app: basic + strategy: + canary: + steps: + - setWeight: 50 + - pause: {} + template: + metadata: + labels: + app: basic + spec: + containers: + - image: nginx:1.19-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/progressing_newGeneration.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/progressing_newGeneration.yaml new file mode 100644 index 0000000..1a45b71 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/progressing_newGeneration.yaml @@ -0,0 +1,60 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + creationTimestamp: "2020-11-13T00:44:55Z" + generation: 2 + name: basic + namespace: argocd-e2e + resourceVersion: "182108" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/argocd-e2e/rollouts/basic + uid: 34e4bbfc-222c-4968-bd60-2b30ae81110d +spec: + replicas: 1 + selector: + matchLabels: + app: basic + strategy: + canary: + steps: + - setWeight: 50 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: basic + spec: + containers: + - image: nginx:1.19-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi +status: + HPAReplicas: 1 + availableReplicas: 1 + blueGreen: {} + canary: {} + conditions: + - lastTransitionTime: "2020-11-13T00:48:20Z" + lastUpdateTime: "2020-11-13T00:48:22Z" + message: ReplicaSet "basic-754cb84d5" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2020-11-13T00:48:22Z" + lastUpdateTime: "2020-11-13T00:48:22Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + currentPodHash: 754cb84d5 + currentStepHash: 757f5f97b + currentStepIndex: 2 + observedGeneration: "1" # <-- uses new integer observedGeneration + readyReplicas: 1 + replicas: 1 + selector: app=basic + stableRS: 754cb84d5 + updatedReplicas: 1 diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/progressing_newWorkloadGeneration.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/progressing_newWorkloadGeneration.yaml new file mode 100644 index 0000000..bc5f0f4 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/progressing_newWorkloadGeneration.yaml @@ -0,0 +1,60 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: "1" + rollout.argoproj.io/workload-generation: "2" + creationTimestamp: "2021-07-27T12:14:11Z" + generation: 3 + name: rollout-ref-deployment + namespace: default + resourceVersion: "4220" + uid: a3d1d224-ac4f-4f84-9274-e01e1d43b036 +spec: + replicas: 5 + strategy: + canary: + steps: + - setWeight: 20 + - pause: + duration: 10s + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: rollout-ref-deployment +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: {} + collisionCount: 1 + conditions: + - lastTransitionTime: "2021-07-27T12:14:21Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: RolloutCompleted + reason: RolloutCompleted + status: "True" + type: Completed + - lastTransitionTime: "2021-07-27T12:14:11Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: ReplicaSet "rollout-ref-deployment-75bbd56864" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2021-07-27T12:14:21Z" + lastUpdateTime: "2021-07-27T12:14:21Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + currentPodHash: 75bbd56864 + currentStepHash: 55f5d87bd9 + currentStepIndex: 2 + observedGeneration: "3" + phase: Healthy + readyReplicas: 5 + replicas: 5 + selector: app=rollout-ref-deployment + stableRS: 75bbd56864 + updatedReplicas: 5 + workloadObservedGeneration: "1" diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_controllerPause.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_controllerPause.yaml new file mode 100644 index 0000000..a3b9af3 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_controllerPause.yaml @@ -0,0 +1,52 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: canary-demo + namespace: default +spec: + replicas: 5 + revisionHistoryLimit: 1 + selector: + matchLabels: + app: canary-demo + strategy: + canary: + canaryService: canary-demo-preview + steps: + - setWeight: 20 + - pause: {} + template: + metadata: + creationTimestamp: null + labels: + app: canary-demo + spec: + containers: + - image: argoproj/rollouts-demo:yellow + imagePullPolicy: Always + name: canary-demo +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + stableRS: 75c96899b8 + conditions: + - lastTransitionTime: "2019-12-09T18:34:12Z" + lastUpdateTime: "2019-12-09T18:34:12Z" + message: Rollout is paused + reason: RolloutPaused + status: Unknown + type: Progressing + controllerPause: true + currentPodHash: 6758949f55 + currentStepHash: f64cdc9d + currentStepIndex: 1 + observedGeneration: 5b9f9dd6c6 + pauseConditions: + - reason: CanaryPauseStep + startTime: "2019-12-09T18:34:12Z" + readyReplicas: 5 + replicas: 5 + selector: app=canary-demo + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_userPause.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_userPause.yaml new file mode 100644 index 0000000..66c958f --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_userPause.yaml @@ -0,0 +1,46 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: example-rollout-canary + namespace: default +spec: + paused: true + replicas: 5 + selector: + matchLabels: + app: guestbook + strategy: + canary: + steps: + - setWeight: 20 + - pause: {} + template: + metadata: + labels: + app: guestbook + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook +status: + HPAReplicas: 5 + availableReplicas: 5 + blueGreen: {} + canary: + stableRS: df986d68 + conditions: + - lastTransitionTime: 2019-04-26T20:18:38Z + lastUpdateTime: 2019-04-26T20:18:38Z + message: Rollout is paused + reason: RolloutPaused + status: Unknown + type: Progressing + currentPodHash: 6b566f47b7 + currentStepHash: 6567fc959c + currentStepIndex: 1 + observedGeneration: 5c788f4484 + pauseStartTime: 2019-04-26T20:18:38Z + readyReplicas: 5 + replicas: 5 + selector: app=guestbook + updatedReplicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_v1.0_pausedRollout.yaml b/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_v1.0_pausedRollout.yaml new file mode 100644 index 0000000..9684a91 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/Rollout/testdata/suspended_v1.0_pausedRollout.yaml @@ -0,0 +1,97 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + rollout.argoproj.io/revision: '2' + creationTimestamp: '2021-06-30T04:40:44Z' + generation: 5 + labels: + app.kubernetes.io/instance: rollouts-demo + name: rollouts-demo + namespace: rollouts-demo + resourceVersion: '4838641' + uid: bf946046-d90e-49b9-863c-76f82bed3b31 +spec: + replicas: 5 + revisionHistoryLimit: 1 + selector: + matchLabels: + app: rollouts-demo + strategy: + canary: + canaryService: rollouts-demo-desired + stableService: rollouts-demo-stable + steps: + - setWeight: 21 + - pause: {} + - setWeight: 40 + - pause: + duration: 10 + - setWeight: 60 + - pause: + duration: 10 + - setWeight: 80 + - pause: + duration: 10 + template: + metadata: + labels: + app: rollouts-demo + spec: + containers: + - image: 'argoproj/rollouts-demo:yellow' + imagePullPolicy: Always + name: rollouts-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + HPAReplicas: 6 + availableReplicas: 6 + blueGreen: {} + canary: {} + conditions: + - lastTransitionTime: '2021-06-30T05:01:22Z' + lastUpdateTime: '2021-06-30T05:01:22Z' + message: RolloutCompleted + reason: RolloutCompleted + status: 'False' + type: Completed + - lastTransitionTime: '2021-06-30T05:01:27Z' + lastUpdateTime: '2021-06-30T05:01:27Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + - lastTransitionTime: '2021-07-15T21:08:55Z' + lastUpdateTime: '2021-07-15T21:08:55Z' + message: Rollout is paused + reason: RolloutPaused + status: Unknown + type: Progressing + - lastTransitionTime: '2021-07-15T21:08:55Z' + lastUpdateTime: '2021-07-15T21:08:55Z' + message: Rollout is paused + reason: RolloutPaused + status: 'True' + type: Paused + controllerPause: true + currentPodHash: 8566c77b97 + currentStepHash: 7d5979db69 + currentStepIndex: 1 + message: CanaryPauseStep + observedGeneration: '5' + pauseConditions: + - reason: CanaryPauseStep + startTime: '2021-07-15T21:08:55Z' + phase: Paused + readyReplicas: 6 + replicas: 6 + selector: app=rollouts-demo + stableRS: 77f4f8ff97 + updatedReplicas: 2 diff --git a/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/action_test.yaml b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/action_test.yaml new file mode 100644 index 0000000..db503fe --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/action_test.yaml @@ -0,0 +1,4 @@ +actionTests: +- action: create-workflow + inputPath: testdata/workflowtemplate.yaml + expectedOutputPath: testdata/workflow.yaml diff --git a/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/create-workflow/action.lua b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/create-workflow/action.lua new file mode 100644 index 0000000..1eaffc4 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/create-workflow/action.lua @@ -0,0 +1,39 @@ +local os = require("os") + +-- This action constructs a Workflow resource from a WorkflowTemplate resource, to enable creating a WorkflowTemplate instance +-- on demand. +-- It returns an array with a single member - a table with the operation to perform (create) and the Workflow resource. +-- It mimics the output of "argo submit --from=workflowtemplate/" command, declaratively. + +-- This code is written to mimic what the Argo Workflows API server does to create a Workflow from a WorkflowTemplate. +-- https://github.com/argoproj/argo-workflows/blob/873a58de7dd9dad76d5577b8c4294a58b52849b8/workflow/common/convert.go#L34 + +local workflow = {} +workflow.apiVersion = "argoproj.io/v1alpha1" +workflow.kind = "Workflow" + +workflow.metadata = {} +workflow.metadata.name = obj.metadata.name .. "-" ..os.date("!%Y%m%d%H%M") +workflow.metadata.namespace = obj.metadata.namespace +workflow.metadata.labels = {} +workflow.metadata.labels["workflows.argoproj.io/workflow-template"] = obj.metadata.name + +workflow.spec = {} +workflow.spec.workflowTemplateRef = {} +workflow.spec.workflowTemplateRef.name = obj.metadata.name + +local ownerRef = {} +ownerRef.apiVersion = obj.apiVersion +ownerRef.kind = obj.kind +ownerRef.name = obj.metadata.name +ownerRef.uid = obj.metadata.uid +workflow.metadata.ownerReferences = {} +workflow.metadata.ownerReferences[1] = ownerRef + +local impactedResource = {} +impactedResource.operation = "create" +impactedResource.resource = workflow +local result = {} +result[1] = impactedResource + +return result diff --git a/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/discovery.lua b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/discovery.lua new file mode 100644 index 0000000..9a76d96 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/discovery.lua @@ -0,0 +1,6 @@ +local actions = {} +actions["create-workflow"] = { + ["iconClass"] = "fa fa-fw fa-play", + ["displayName"] = "Create Workflow" +} +return actions \ No newline at end of file diff --git a/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/testdata/workflow.yaml b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/testdata/workflow.yaml new file mode 100644 index 0000000..46063be --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/testdata/workflow.yaml @@ -0,0 +1,16 @@ +- k8sOperation: create + unstructuredObj: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + labels: + workflows.argoproj.io/workflow-template: workflow-template-submittable + name: workflow-template-submittable-202306221735 + namespace: default + ownerReferences: + - apiVersion: argoproj.io/v1alpha1 + kind: WorkflowTemplate + name: workflow-template-submittable + spec: + workflowTemplateRef: + name: workflow-template-submittable diff --git a/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/testdata/workflowtemplate.yaml b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/testdata/workflowtemplate.yaml new file mode 100644 index 0000000..5b7d231 --- /dev/null +++ b/pkg/resource_customizations/argoproj.io/WorkflowTemplate/actions/testdata/workflowtemplate.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + labels: + app.kubernetes.io/instance: test + name: workflow-template-submittable + namespace: default +spec: + arguments: + parameters: + - name: message + value: hello world + entrypoint: whalesay-template + templates: + - container: + args: + - '{{inputs.parameters.message}}' + command: + - cowsay + image: docker/whalesay + inputs: + parameters: + - name: message + name: whalesay-template diff --git a/pkg/resource_customizations/batch/CronJob/actions/action_test.yaml b/pkg/resource_customizations/batch/CronJob/actions/action_test.yaml new file mode 100644 index 0000000..a9b5320 --- /dev/null +++ b/pkg/resource_customizations/batch/CronJob/actions/action_test.yaml @@ -0,0 +1,4 @@ +actionTests: +- action: create-job + inputPath: testdata/cronjob.yaml + expectedOutputPath: testdata/job.yaml diff --git a/pkg/resource_customizations/batch/CronJob/actions/create-job/action.lua b/pkg/resource_customizations/batch/CronJob/actions/create-job/action.lua new file mode 100644 index 0000000..a6f3253 --- /dev/null +++ b/pkg/resource_customizations/batch/CronJob/actions/create-job/action.lua @@ -0,0 +1,64 @@ +local os = require("os") + +-- This action constructs a Job resource from a CronJob resource, to enable creating a CronJob instance on demand. +-- It returns an array with a single member - a table with the operation to perform (create) and the Job resource. +-- It mimics the output of "kubectl create job --from=" command, declaratively. + +-- Deep-copying an object is a ChatGPT generated code. +-- Since empty tables are treated as empty arrays, the resulting k8s resource might be invalid (arrays instead of maps). +-- So empty tables are not cloned to the target object. +function deepCopy(object) + local lookup_table = {} + local function _copy(obj) + if type(obj) ~= "table" then + return obj + elseif lookup_table[obj] then + return lookup_table[obj] + elseif next(obj) == nil then + return nil + else + local new_table = {} + lookup_table[obj] = new_table + for key, value in pairs(obj) do + new_table[_copy(key)] = _copy(value) + end + return setmetatable(new_table, getmetatable(obj)) + end + end + return _copy(object) +end + +local job = {} +job.apiVersion = "batch/v1" +job.kind = "Job" + +job.metadata = deepCopy(obj.spec.jobTemplate.metadata) +if job.metadata == nil then + job.metadata = {} +end +job.metadata.name = obj.metadata.name .. "-" ..os.date("!%Y%m%d%H%M") +job.metadata.namespace = obj.metadata.namespace +if job.metadata.annotations == nil then + job.metadata.annotations = {} +end +job.metadata.annotations['cronjob.kubernetes.io/instantiate'] = "manual" + +local ownerRef = {} +ownerRef.apiVersion = obj.apiVersion +ownerRef.kind = obj.kind +ownerRef.name = obj.metadata.name +ownerRef.uid = obj.metadata.uid +ownerRef.blockOwnerDeletion = true +ownerRef.controller = true +job.metadata.ownerReferences = {} +job.metadata.ownerReferences[1] = ownerRef + +job.spec = deepCopy(obj.spec.jobTemplate.spec) + +local impactedResource = {} +impactedResource.operation = "create" +impactedResource.resource = job +local result = {} +result[1] = impactedResource + +return result diff --git a/pkg/resource_customizations/batch/CronJob/actions/discovery.lua b/pkg/resource_customizations/batch/CronJob/actions/discovery.lua new file mode 100644 index 0000000..61be2c3 --- /dev/null +++ b/pkg/resource_customizations/batch/CronJob/actions/discovery.lua @@ -0,0 +1,6 @@ +local actions = {} +actions["create-job"] = { + ["iconClass"] = "fa fa-fw fa-play", + ["displayName"] = "Create Job" +} +return actions \ No newline at end of file diff --git a/pkg/resource_customizations/batch/CronJob/actions/testdata/cronjob.yaml b/pkg/resource_customizations/batch/CronJob/actions/testdata/cronjob.yaml new file mode 100644 index 0000000..2c45c5e --- /dev/null +++ b/pkg/resource_customizations/batch/CronJob/actions/testdata/cronjob.yaml @@ -0,0 +1,33 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hello + namespace: test-ns + uid: "123" +spec: + schedule: "* * * * *" + jobTemplate: + metadata: + labels: + my: label + annotations: + my: annotation + spec: + ttlSecondsAfterFinished: 100 + template: + metadata: + labels: + pod: label + annotations: + pod: annotation + spec: + containers: + - name: hello + image: busybox:1.28 + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + resources: {} + restartPolicy: OnFailure \ No newline at end of file diff --git a/pkg/resource_customizations/batch/CronJob/actions/testdata/job.yaml b/pkg/resource_customizations/batch/CronJob/actions/testdata/job.yaml new file mode 100644 index 0000000..322ab04 --- /dev/null +++ b/pkg/resource_customizations/batch/CronJob/actions/testdata/job.yaml @@ -0,0 +1,30 @@ +- k8sOperation: create + unstructuredObj: + apiVersion: batch/v1 + kind: Job + metadata: + name: hello-00000000000 + namespace: test-ns + labels: + my: label + annotations: + cronjob.kubernetes.io/instantiate: manual + my: annotation + spec: + ttlSecondsAfterFinished: 100 + template: + metadata: + labels: + pod: label + annotations: + pod: annotation + spec: + containers: + - name: hello + image: busybox:1.28 + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + restartPolicy: OnFailure diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/health.lua b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/health.lua new file mode 100644 index 0000000..c7639db --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/health.lua @@ -0,0 +1,31 @@ +local hs = {} + +if obj.status ~= nil and (obj.status.health ~= nil or obj.status.expectedNodes ~= nil) then + if obj.status.health == "red" then + hs.status = "Degraded" + hs.message = "Elastic Beat status is Red" + return hs + elseif obj.status.health == "green" then + hs.status = "Healthy" + hs.message = "Elastic Beat status is Green" + return hs + elseif obj.status.health == "yellow" then + if obj.status.availableNodes ~= nil and obj.status.expectedNodes ~= nil then + hs.status = "Progressing" + hs.message = "Elastic Beat status is deploying, there is " .. obj.status.availableNodes .. " instance(s) on " .. obj.status.expectedNodes .. " expected" + return hs + else + hs.status = "Progressing" + hs.message = "Elastic Beat phase is progressing" + return hs + end + elseif obj.status.health == nil then + hs.status = "Progressing" + hs.message = "Elastic Beat phase is progressing" + return hs + end +end + +hs.status = "Unknown" +hs.message = "Elastic Beat status is unknown. Ensure your ArgoCD is current and then check for/file a bug report: https://github.com/argoproj/argo-cd/issues" +return hs diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/health_test.yaml b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/health_test.yaml new file mode 100644 index 0000000..fb44e99 --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/health_test.yaml @@ -0,0 +1,29 @@ +tests: +- healthStatus: + status: Healthy + message: "Elastic Beat status is Green" + inputPath: testdata/ready_green.yaml +- healthStatus: + status: Progressing + message: "Elastic Beat phase is progressing" + inputPath: testdata/ready_yellow_single_node.yaml +- healthStatus: + status: Progressing + message: "Elastic Beat status is deploying, there is 1 instance(s) on 2 expected" + inputPath: testdata/ready_yellow.yaml +- healthStatus: + status: Progressing + message: "Elastic Beat phase is progressing" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "Elastic Beat status is Red" + inputPath: testdata/ready_red.yaml +- healthStatus: + status: Unknown + message: "Elastic Beat status is unknown. Ensure your ArgoCD is current and then check for/file a bug report: https://github.com/argoproj/argo-cd/issues" + inputPath: testdata/unknown.yaml +- healthStatus: + status: Unknown + message: "Elastic Beat status is unknown. Ensure your ArgoCD is current and then check for/file a bug report: https://github.com/argoproj/argo-cd/issues" + inputPath: testdata/invalid.yaml diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/invalid.yaml b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/invalid.yaml new file mode 100644 index 0000000..3eca183 --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/invalid.yaml @@ -0,0 +1,12 @@ +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: quickstart +spec: + version: 8.8.8 + type: metricbeat +status: + expectedNodes: 1 + health: invalid + observedGeneration: 1 + version: 8.8.1 diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/progressing.yaml b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/progressing.yaml new file mode 100644 index 0000000..b007ad7 --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/progressing.yaml @@ -0,0 +1,11 @@ +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: quickstart +spec: + version: 8.8.8 + type: metricbeat +status: + expectedNodes: 1 + observedGeneration: 1 + version: 8.8.1 diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_green.yaml b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_green.yaml new file mode 100644 index 0000000..3f3c186 --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_green.yaml @@ -0,0 +1,13 @@ +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: quickstart +spec: + version: 8.8.8 + type: metricbeat +status: + expectedNodes: 1 + availableNodes: 1 + health: green + observedGeneration: 1 + version: 8.8.1 diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_red.yaml b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_red.yaml new file mode 100644 index 0000000..fc2433c --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_red.yaml @@ -0,0 +1,10 @@ +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: quickstart +spec: + version: 8.8.8 + type: metricbeat +status: + expectedNodes: 1 + health: red diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_yellow.yaml b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_yellow.yaml new file mode 100644 index 0000000..831ee28 --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_yellow.yaml @@ -0,0 +1,11 @@ +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: quickstart +spec: + version: 8.8.8 + type: metricbeat +status: + availableNodes: 1 + expectedNodes: 2 + health: yellow diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_yellow_single_node.yaml b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_yellow_single_node.yaml new file mode 100644 index 0000000..d652b5a --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/ready_yellow_single_node.yaml @@ -0,0 +1,10 @@ +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: quickstart +spec: + version: 8.8.8 + type: metricbeat +status: + expectedNodes: 1 + health: yellow diff --git a/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/unknown.yaml b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/unknown.yaml new file mode 100644 index 0000000..dbcca36 --- /dev/null +++ b/pkg/resource_customizations/beat.k8s.elastic.co/Beat/testdata/unknown.yaml @@ -0,0 +1,8 @@ +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: quickstart +spec: + version: 8.8.8 + type: metricbeat +status: {} diff --git a/pkg/resource_customizations/bitnami.com/SealedSecret/health.lua b/pkg/resource_customizations/bitnami.com/SealedSecret/health.lua new file mode 100644 index 0000000..e18a9a8 --- /dev/null +++ b/pkg/resource_customizations/bitnami.com/SealedSecret/health.lua @@ -0,0 +1,20 @@ +local health_status={} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Synced" and condition.status == "False" then + health_status.status = "Degraded" + health_status.message = condition.message + return health_status + end + if condition.type == "Synced" and condition.status == "True" then + health_status.status = "Healthy" + health_status.message = condition.message + return health_status + end + end + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for Sealed Secret to be decrypted" +return health_status diff --git a/pkg/resource_customizations/bitnami.com/SealedSecret/health_test.yaml b/pkg/resource_customizations/bitnami.com/SealedSecret/health_test.yaml new file mode 100644 index 0000000..8d26cf2 --- /dev/null +++ b/pkg/resource_customizations/bitnami.com/SealedSecret/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for Sealed Secret to be decrypted" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "no key could decrypt secret (.dockerconfigjson)" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/degraded.yaml b/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/degraded.yaml new file mode 100644 index 0000000..01a2f6c --- /dev/null +++ b/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/degraded.yaml @@ -0,0 +1,24 @@ +apiVersion: bitnami.com/v1alpha1 +kind: SealedSecret +metadata: + labels: + app.kubernetes.io/instance: sealed-secrets + name: test + namespace: test +spec: + encryptedData: + .dockerconfigjson: xyz + template: + metadata: + creationTimestamp: null + name: test + namespace: test + type: kubernetes.io/dockerconfigjson +status: + conditions: + - lastTransitionTime: "2021-02-11T15:56:29Z" + lastUpdateTime: "2021-02-11T15:57:37Z" + message: no key could decrypt secret (.dockerconfigjson) + status: "False" + type: Synced + observedGeneration: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/healthy.yaml b/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/healthy.yaml new file mode 100644 index 0000000..b713453 --- /dev/null +++ b/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/healthy.yaml @@ -0,0 +1,24 @@ +apiVersion: bitnami.com/v1alpha1 +kind: SealedSecret +metadata: + labels: + app.kubernetes.io/instance: sealed-secrets + name: test + namespace: test +spec: + encryptedData: + .dockerconfigjson: xyz + template: + metadata: + creationTimestamp: null + name: test + namespace: test + type: kubernetes.io/dockerconfigjson +status: + conditions: + - lastTransitionTime: "2021-02-11T15:56:29Z" + lastUpdateTime: "2021-02-11T15:57:37Z" + message: "" + status: "True" + type: Synced + observedGeneration: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/progressing.yaml b/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/progressing.yaml new file mode 100644 index 0000000..f8d51f3 --- /dev/null +++ b/pkg/resource_customizations/bitnami.com/SealedSecret/testdata/progressing.yaml @@ -0,0 +1,16 @@ +apiVersion: bitnami.com/v1alpha1 +kind: SealedSecret +metadata: + labels: + app.kubernetes.io/instance: sealed-secrets + name: test + namespace: test +spec: + encryptedData: + .dockerconfigjson: xyz + template: + metadata: + creationTimestamp: null + name: test + namespace: test + type: kubernetes.io/dockerconfigjson diff --git a/pkg/resource_customizations/cassandra.rook.io/Cluster/health.lua b/pkg/resource_customizations/cassandra.rook.io/Cluster/health.lua new file mode 100644 index 0000000..74979f8 --- /dev/null +++ b/pkg/resource_customizations/cassandra.rook.io/Cluster/health.lua @@ -0,0 +1,24 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.racks ~= nil then + local all_racks_good = true + for key, value in pairs(obj.status.racks) do + if all_racks_good and value.members ~= nil and value.readyMembers ~= nil and value.members ~= value.readyMembers then + all_racks_good = false + break + end + end + if all_racks_good then + hs.status = "Healthy" + else + hs.status = "Progressing" + hs.message = "Waiting for Cassandra Cluster" + end + return hs + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Cassandra Cluster" +return hs + diff --git a/pkg/resource_customizations/cassandra.rook.io/Cluster/health_test.yaml b/pkg/resource_customizations/cassandra.rook.io/Cluster/health_test.yaml new file mode 100644 index 0000000..2f7b7b5 --- /dev/null +++ b/pkg/resource_customizations/cassandra.rook.io/Cluster/health_test.yaml @@ -0,0 +1,9 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for Cassandra Cluster" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Healthy + message: "" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/cassandra.rook.io/Cluster/testdata/healthy.yaml b/pkg/resource_customizations/cassandra.rook.io/Cluster/testdata/healthy.yaml new file mode 100644 index 0000000..b42422a --- /dev/null +++ b/pkg/resource_customizations/cassandra.rook.io/Cluster/testdata/healthy.yaml @@ -0,0 +1,96 @@ +apiVersion: cassandra.rook.io/v1alpha1 +kind: Cluster +metadata: + name: rook-cassandra + namespace: rook-cassandra +spec: + version: 3.11.6 + repository: my-private-repo.io/cassandra + mode: cassandra + # A key/value list of annotations + annotations: + # key: value + datacenter: + name: us-east-1 + racks: + - name: us-east-1a + members: 3 + storage: + volumeClaimTemplates: + - metadata: + name: rook-cassandra-data + spec: + storageClassName: my-storage-class + resources: + requests: + storage: 200Gi + resources: + requests: + cpu: 8 + memory: 32Gi + limits: + cpu: 8 + memory: 32Gi + - name: us-east-1b + members: 3 + storage: + volumeClaimTemplates: + - metadata: + name: rook-cassandra-data + spec: + storageClassName: my-storage-class + resources: + requests: + storage: 200Gi + resources: + requests: + cpu: 8 + memory: 32Gi + limits: + cpu: 8 + memory: 32Gi + - name: us-east-1c + members: 3 + storage: + volumeClaimTemplates: + - metadata: + name: rook-cassandra-data + spec: + storageClassName: my-storage-class + resources: + requests: + storage: 200Gi + resources: + requests: + cpu: 8 + memory: 32Gi + limits: + cpu: 8 + memory: 32Gi + # A key/value list of annotations + annotations: + # key: value + placement: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/region + operator: In + values: + - us-east-1 + - key: topology.kubernetes.io/zone + operator: In + values: + - us-east-1a +status: + racks: + us-east-1a: + members: 3 + readyMembers: 3 + us-east-1b: + members: 3 + readyMembers: 3 + us-east-1c: + members: 3 + readyMembers: 3 diff --git a/pkg/resource_customizations/cassandra.rook.io/Cluster/testdata/progressing.yaml b/pkg/resource_customizations/cassandra.rook.io/Cluster/testdata/progressing.yaml new file mode 100644 index 0000000..4034bec --- /dev/null +++ b/pkg/resource_customizations/cassandra.rook.io/Cluster/testdata/progressing.yaml @@ -0,0 +1,96 @@ +apiVersion: cassandra.rook.io/v1alpha1 +kind: Cluster +metadata: + name: rook-cassandra + namespace: rook-cassandra +spec: + version: 3.11.6 + repository: my-private-repo.io/cassandra + mode: cassandra + # A key/value list of annotations + annotations: + # key: value + datacenter: + name: us-east-1 + racks: + - name: us-east-1a + members: 3 + storage: + volumeClaimTemplates: + - metadata: + name: rook-cassandra-data + spec: + storageClassName: my-storage-class + resources: + requests: + storage: 200Gi + resources: + requests: + cpu: 8 + memory: 32Gi + limits: + cpu: 8 + memory: 32Gi + - name: us-east-1b + members: 3 + storage: + volumeClaimTemplates: + - metadata: + name: rook-cassandra-data + spec: + storageClassName: my-storage-class + resources: + requests: + storage: 200Gi + resources: + requests: + cpu: 8 + memory: 32Gi + limits: + cpu: 8 + memory: 32Gi + - name: us-east-1c + members: 3 + storage: + volumeClaimTemplates: + - metadata: + name: rook-cassandra-data + spec: + storageClassName: my-storage-class + resources: + requests: + storage: 200Gi + resources: + requests: + cpu: 8 + memory: 32Gi + limits: + cpu: 8 + memory: 32Gi + # A key/value list of annotations + annotations: + # key: value + placement: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/region + operator: In + values: + - us-east-1 + - key: topology.kubernetes.io/zone + operator: In + values: + - us-east-1a +status: + racks: + us-east-1a: + members: 3 + readyMembers: 1 + us-east-1b: + members: 3 + readyMembers: 0 + us-east-1c: + members: 3 + readyMembers: 3 diff --git a/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/health.lua b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/health.lua new file mode 100644 index 0000000..10e3d1b --- /dev/null +++ b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/health.lua @@ -0,0 +1,25 @@ +local hs = { status="Progressing", message="No status available"} +if obj.status ~= nil then + if obj.status.phase ~= nil then + hs.message = obj.status.phase + if hs.message == "Succeeded" then + hs.status = "Healthy" + return hs + elseif hs.message == "Failed" or hs.message == "Unknown" then + hs.status = "Degraded" + elseif hs.message == "Paused" then + hs.status = "Suspended" + return hs + end + end + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Running" and condition.status == "False" and condition.reason == "Error" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + end + end +end +return hs diff --git a/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/health_test.yaml b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/health_test.yaml new file mode 100644 index 0000000..4f6ad1d --- /dev/null +++ b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Progressing + message: "No status available" + inputPath: testdata/progressing_nostatus.yaml +- healthStatus: + status: Progressing + message: "ImportInProgress" + inputPath: testdata/progressing_importing.yaml +- healthStatus: + status: Healthy + message: "Succeeded" + inputPath: testdata/healthy_succeeded.yaml +- healthStatus: + status: Progressing + message: "ImportScheduled" + inputPath: testdata/progressing_containercreating.yaml +- healthStatus: + status: Degraded + message: "Unable to connect to http data source: expected status code 200, got 404. Status: 404 Not Found" + inputPath: testdata/degraded_badurl.yaml diff --git a/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/degraded_badurl.yaml b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/degraded_badurl.yaml new file mode 100644 index 0000000..409553c --- /dev/null +++ b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/degraded_badurl.yaml @@ -0,0 +1,41 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + creationTimestamp: "2021-10-25T22:23:10Z" + generation: 14 + name: example-badimport-dv + namespace: default + resourceVersion: "44636962" + uid: 8d55b149-d418-47d6-b60b-48d823142418 +spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + source: + http: + url: https://download.fedoraproject.org/pub/no-such-image.xz +status: + conditions: + - lastHeartbeatTime: "2021-10-25T22:23:15Z" + lastTransitionTime: "2021-10-25T22:23:15Z" + message: PVC example-badimport-dv Bound + reason: Bound + status: "True" + type: Bound + - lastHeartbeatTime: "2021-10-25T22:23:47Z" + lastTransitionTime: "2021-10-25T22:23:10Z" + status: "False" + type: Ready + - lastHeartbeatTime: "2021-10-25T22:23:47Z" + lastTransitionTime: "2021-10-25T22:23:47Z" + message: 'Unable to connect to http data source: expected status code 200, got + 404. Status: 404 Not Found' + reason: Error + status: "False" + type: Running + phase: ImportInProgress + progress: N/A + restartCount: 2 diff --git a/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/healthy_succeeded.yaml b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/healthy_succeeded.yaml new file mode 100644 index 0000000..f15a5ae --- /dev/null +++ b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/healthy_succeeded.yaml @@ -0,0 +1,41 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + annotations: + cdi.kubevirt.io/storage.bind.immediate.requested: "true" + kubevirt.ui/provider: fedora + labels: + app.kubernetes.io/instance: datavolumes + name: fedora + namespace: openshift-virtualization-os-images +spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + volumeMode: Filesystem + source: + http: + url: https://mirror.arizona.edu/fedora/linux/releases/34/Cloud/x86_64/images/Fedora-Cloud-Base-34-1.2.x86_64.raw.xz +status: + conditions: + - lastHeartbeatTime: "2021-08-30T21:31:52Z" + lastTransitionTime: "2021-08-30T21:31:52Z" + message: PVC fedora Bound + reason: Bound + status: "True" + type: Bound + - lastHeartbeatTime: "2021-08-30T21:34:36Z" + lastTransitionTime: "2021-08-30T21:34:36Z" + status: "True" + type: Ready + - lastHeartbeatTime: "2021-08-30T21:34:36Z" + lastTransitionTime: "2021-08-30T21:34:36Z" + message: Import Complete + reason: Completed + status: "False" + type: Running + phase: Succeeded + progress: 100.0% diff --git a/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_containercreating.yaml b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_containercreating.yaml new file mode 100644 index 0000000..b4bf917 --- /dev/null +++ b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_containercreating.yaml @@ -0,0 +1,35 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + creationTimestamp: "2021-10-25T22:23:10Z" + name: example-badimport-dv + namespace: default +spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + source: + http: + url: https://download.fedoraproject.org/pub/no-such-image.xz +status: + conditions: + - lastHeartbeatTime: "2021-10-25T22:23:15Z" + lastTransitionTime: "2021-10-25T22:23:15Z" + message: PVC example-badimport-dv Bound + reason: Bound + status: "True" + type: Bound + - lastHeartbeatTime: "2021-10-25T22:23:10Z" + lastTransitionTime: "2021-10-25T22:23:10Z" + status: "False" + type: Ready + - lastHeartbeatTime: "2021-10-25T22:23:15Z" + lastTransitionTime: "2021-10-25T22:23:10Z" + reason: ContainerCreating + status: "False" + type: Running + phase: ImportScheduled + progress: N/A diff --git a/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_importing.yaml b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_importing.yaml new file mode 100644 index 0000000..86da35f --- /dev/null +++ b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_importing.yaml @@ -0,0 +1,42 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + annotations: + cdi.kubevirt.io/storage.bind.immediate.requested: "true" + kubevirt.ui/provider: centos + labels: + app.kubernetes.io/instance: datavolumes + name: centos8 + namespace: openshift-virtualization-os-images +spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + volumeMode: Filesystem + source: + http: + url: https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210603.0.x86_64.qcow2 +status: + conditions: + - lastHeartbeatTime: "2021-09-07T15:24:46Z" + lastTransitionTime: "2021-09-07T15:24:46Z" + message: PVC centos8 Bound + reason: Bound + status: "True" + type: Bound + - lastHeartbeatTime: "2021-09-07T15:25:33Z" + lastTransitionTime: "2021-09-07T15:24:37Z" + reason: TransferRunning + status: "False" + type: Ready + - lastHeartbeatTime: "2021-09-07T15:24:55Z" + lastTransitionTime: "2021-09-07T15:24:55Z" + reason: Pod is running + status: "True" + type: Running + phase: ImportInProgress + progress: 2.00% + diff --git a/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_nostatus.yaml b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_nostatus.yaml new file mode 100644 index 0000000..a7c6f0f --- /dev/null +++ b/pkg/resource_customizations/cdi.kubevirt.io/DataVolume/testdata/progressing_nostatus.yaml @@ -0,0 +1,22 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + annotations: + cdi.kubevirt.io/storage.bind.immediate.requested: "true" + kubevirt.ui/provider: centos + labels: + app.kubernetes.io/instance: datavolumes + name: centos8 + namespace: openshift-virtualization-os-images +spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + volumeMode: Filesystem + source: + http: + url: https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210603.0.x86_64.qcow2 +status: {} diff --git a/pkg/resource_customizations/cert-manager.io/Certificate/health.lua b/pkg/resource_customizations/cert-manager.io/Certificate/health.lua new file mode 100644 index 0000000..fce5bcb --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Certificate/health.lua @@ -0,0 +1,31 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + + -- Always Handle Issuing First to ensure consistent behaviour + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Issuing" and condition.status == "True" then + hs.status = "Progressing" + hs.message = condition.message + return hs + end + end + + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for certificate" +return hs diff --git a/pkg/resource_customizations/cert-manager.io/Certificate/health_test.yaml b/pkg/resource_customizations/cert-manager.io/Certificate/health_test.yaml new file mode 100644 index 0000000..1af7b1a --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Certificate/health_test.yaml @@ -0,0 +1,26 @@ +tests: +- healthStatus: + status: Progressing + message: Waiting for certificate + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Progressing + message: Issuing certificate as Secret does not exist + inputPath: testdata/progressing_issuing.yaml +- healthStatus: + status: Progressing + message: Issuing certificate as Secret does not exist + inputPath: testdata/progressing_issuing_last.yaml +- healthStatus: + status: Degraded + message: 'Resource validation failed: spec.acme.config: Required value: no ACME + solver configuration specified for domain "cd.apps.argoproj.io"' + inputPath: testdata/degraded_configError.yaml +- healthStatus: + status: Healthy + message: 'Certificate issued successfully' + inputPath: testdata/healthy_issued.yaml +- healthStatus: + status: Healthy + message: 'Certificate renewed successfully' + inputPath: testdata/healthy_renewed.yaml diff --git a/pkg/resource_customizations/cert-manager.io/Certificate/testdata/degraded_configError.yaml b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/degraded_configError.yaml new file mode 100644 index 0000000..335d42f --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/degraded_configError.yaml @@ -0,0 +1,35 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"cert-manager.io/v1alpha2","kind":"Certificate","metadata":{"annotations":{},"name":"test-cert","namespace":"argocd"},"spec":{"acme":{"config":[{"domains":["cd.apps.argoproj.io"],"http01":{"ingress":"http01"}}]},"commonName":"cd.apps.argoproj.io","dnsNames":["cd.apps.argoproj.io"],"issuerRef":{"kind":"Issuer","name":"argo-cd-issuer"}}} + creationTimestamp: "2019-02-15T18:17:06Z" + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: "68338442" + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io123 + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret +status: + conditions: + - lastTransitionTime: "2019-02-15T18:26:37Z" + message: 'Resource validation failed: spec.acme.config: Required value: no ACME + solver configuration specified for domain "cd.apps.argoproj.io"' + reason: ConfigError + status: "False" + type: Ready diff --git a/pkg/resource_customizations/cert-manager.io/Certificate/testdata/healthy_issued.yaml b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/healthy_issued.yaml new file mode 100644 index 0000000..c6c2b90 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/healthy_issued.yaml @@ -0,0 +1,39 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + creationTimestamp: "2019-02-15T18:17:06Z" + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: "68337322" + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret +status: + acme: + order: + url: https://acme-v02.api.letsencrypt.org/acme/order/45250083/316944902 + conditions: + - lastTransitionTime: "2019-02-15T18:21:10Z" + message: Order validated + reason: OrderValidated + status: "False" + type: ValidateFailed + - lastTransitionTime: null + message: Certificate issued successfully + reason: CertIssued + status: "True" + type: Ready diff --git a/pkg/resource_customizations/cert-manager.io/Certificate/testdata/healthy_renewed.yaml b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/healthy_renewed.yaml new file mode 100644 index 0000000..7bc66f2 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/healthy_renewed.yaml @@ -0,0 +1,39 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + creationTimestamp: '2018-11-07T00:06:12Z' + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: '64763033' + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret +status: + acme: + order: + url: 'https://acme-v02.api.letsencrypt.org/acme/order/45250083/298963150' + conditions: + - lastTransitionTime: '2019-02-03T09:48:13Z' + message: Certificate renewed successfully + reason: CertRenewed + status: 'True' + type: Ready + - lastTransitionTime: '2019-02-03T09:48:11Z' + message: Order validated + reason: OrderValidated + status: 'False' + type: ValidateFailed diff --git a/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_issuing.yaml b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_issuing.yaml new file mode 100644 index 0000000..acde21e --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_issuing.yaml @@ -0,0 +1,37 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + creationTimestamp: '2018-11-07T00:06:12Z' + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: '64763033' + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret +status: + conditions: + - lastTransitionTime: '2021-09-15T02:10:00Z' + message: Issuing certificate as Secret does not exist + reason: DoesNotExist + status: 'True' + type: Issuing + - lastTransitionTime: '2021-09-15T02:10:00Z' + message: Issuing certificate as Secret does not exist + reason: DoesNotExist + status: 'False' + type: Ready + diff --git a/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_issuing_last.yaml b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_issuing_last.yaml new file mode 100644 index 0000000..4d21a9b --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_issuing_last.yaml @@ -0,0 +1,36 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + creationTimestamp: '2018-11-07T00:06:12Z' + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: '64763033' + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret +status: + conditions: + - lastTransitionTime: '2021-09-15T02:10:00Z' + message: Issuing certificate as Secret does not exist + reason: DoesNotExist + status: 'False' + type: Ready + - lastTransitionTime: '2021-09-15T02:10:00Z' + message: Issuing certificate as Secret does not exist + reason: DoesNotExist + status: 'True' + type: Issuing diff --git a/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..1d8edfc --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Certificate/testdata/progressing_noStatus.yaml @@ -0,0 +1,24 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + creationTimestamp: '2018-11-07T00:06:12Z' + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: '64763033' + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret diff --git a/pkg/resource_customizations/cert-manager.io/ClusterIssuer/health.lua b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/health.lua new file mode 100644 index 0000000..b6eb208 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Initializing ClusterIssuer" +return hs diff --git a/pkg/resource_customizations/cert-manager.io/ClusterIssuer/health_test.yaml b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/health_test.yaml new file mode 100644 index 0000000..1eda5cf --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/health_test.yaml @@ -0,0 +1,14 @@ +tests: +- healthStatus: + status: Progressing + message: Initializing ClusterIssuer + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Healthy + message: The ACME account was registered with the ACME server + inputPath: testdata/healthy_registered.yaml +- healthStatus: + status: Degraded + message: "Failed to verify ACME account: acme: : 404 page not found\n" + inputPath: testdata/degraded_acmeFailed.yaml + diff --git a/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/degraded_acmeFailed.yaml b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/degraded_acmeFailed.yaml new file mode 100644 index 0000000..c99c1f4 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/degraded_acmeFailed.yaml @@ -0,0 +1,26 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + creationTimestamp: "2019-02-15T19:23:48Z" + generation: 1 + name: test-issuer + resourceVersion: "68352438" + uid: 37f408e3-3157-11e9-be3f-42010a800011 +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory124 +status: + acme: + uri: "" + conditions: + - lastTransitionTime: "2019-02-15T19:23:53Z" + message: | + Failed to verify ACME account: acme: : 404 page not found + reason: ErrRegisterACMEAccount + status: "False" + type: Ready diff --git a/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/healthy_registered.yaml b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/healthy_registered.yaml new file mode 100644 index 0000000..e883b51 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/healthy_registered.yaml @@ -0,0 +1,25 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + creationTimestamp: "2018-11-06T23:14:18Z" + generation: 1 + name: test-issuer + resourceVersion: "48889060" + uid: b0045219-e219-11e8-9f93-42010a80021d +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory +status: + acme: + uri: https://acme-v02.api.letsencrypt.org/acme/acct/45250083 + conditions: + - lastTransitionTime: "2018-12-06T06:42:59Z" + message: The ACME account was registered with the ACME server + reason: ACMEAccountRegistered + status: "True" + type: Ready diff --git a/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..4571d22 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/ClusterIssuer/testdata/progressing_noStatus.yaml @@ -0,0 +1,16 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + creationTimestamp: "2018-11-06T23:14:18Z" + generation: 1 + name: test-issuer + resourceVersion: "48889060" + uid: b0045219-e219-11e8-9f93-42010a80021d +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory diff --git a/pkg/resource_customizations/cert-manager.io/Issuer/health.lua b/pkg/resource_customizations/cert-manager.io/Issuer/health.lua new file mode 100644 index 0000000..497a6a7 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Issuer/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Initializing issuer" +return hs diff --git a/pkg/resource_customizations/cert-manager.io/Issuer/health_test.yaml b/pkg/resource_customizations/cert-manager.io/Issuer/health_test.yaml new file mode 100644 index 0000000..e48df5e --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Issuer/health_test.yaml @@ -0,0 +1,14 @@ +tests: +- healthStatus: + status: Progressing + message: Initializing issuer + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Healthy + message: The ACME account was registered with the ACME server + inputPath: testdata/healthy_registered.yaml +- healthStatus: + status: Degraded + message: "Failed to verify ACME account: acme: : 404 page not found\n" + inputPath: testdata/degraded_acmeFailed.yaml + diff --git a/pkg/resource_customizations/cert-manager.io/Issuer/testdata/degraded_acmeFailed.yaml b/pkg/resource_customizations/cert-manager.io/Issuer/testdata/degraded_acmeFailed.yaml new file mode 100644 index 0000000..a5abcf5 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Issuer/testdata/degraded_acmeFailed.yaml @@ -0,0 +1,28 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + creationTimestamp: "2019-02-15T19:23:48Z" + generation: 1 + name: test-issuer + namespace: argocd + resourceVersion: "68352438" + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/issuers/test-issuer + uid: 37f408e3-3157-11e9-be3f-42010a800011 +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory124 +status: + acme: + uri: "" + conditions: + - lastTransitionTime: "2019-02-15T19:23:53Z" + message: | + Failed to verify ACME account: acme: : 404 page not found + reason: ErrRegisterACMEAccount + status: "False" + type: Ready diff --git a/pkg/resource_customizations/cert-manager.io/Issuer/testdata/healthy_registered.yaml b/pkg/resource_customizations/cert-manager.io/Issuer/testdata/healthy_registered.yaml new file mode 100644 index 0000000..0718156 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Issuer/testdata/healthy_registered.yaml @@ -0,0 +1,27 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + creationTimestamp: "2018-11-06T23:14:18Z" + generation: 1 + name: test-issuer + namespace: argocd + resourceVersion: "48889060" + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/issuers/argo-cd-issuer + uid: b0045219-e219-11e8-9f93-42010a80021d +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory +status: + acme: + uri: https://acme-v02.api.letsencrypt.org/acme/acct/45250083 + conditions: + - lastTransitionTime: "2018-12-06T06:42:59Z" + message: The ACME account was registered with the ACME server + reason: ACMEAccountRegistered + status: "True" + type: Ready diff --git a/pkg/resource_customizations/cert-manager.io/Issuer/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/cert-manager.io/Issuer/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..f2e7b80 --- /dev/null +++ b/pkg/resource_customizations/cert-manager.io/Issuer/testdata/progressing_noStatus.yaml @@ -0,0 +1,18 @@ +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + creationTimestamp: "2018-11-06T23:14:18Z" + generation: 1 + name: test-issuer + namespace: argocd + resourceVersion: "48889060" + selfLink: /apis/cert-manager.io/v1alpha2/namespaces/argocd/issuers/argo-cd-issuer + uid: b0045219-e219-11e8-9f93-42010a80021d +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory diff --git a/pkg/resource_customizations/certmanager.k8s.io/Certificate/health.lua b/pkg/resource_customizations/certmanager.k8s.io/Certificate/health.lua new file mode 100644 index 0000000..d512d2d --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Certificate/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for certificate" +return hs diff --git a/pkg/resource_customizations/certmanager.k8s.io/Certificate/health_test.yaml b/pkg/resource_customizations/certmanager.k8s.io/Certificate/health_test.yaml new file mode 100644 index 0000000..9b00429 --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Certificate/health_test.yaml @@ -0,0 +1,18 @@ +tests: +- healthStatus: + status: Progressing + message: Waiting for certificate + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Degraded + message: 'Resource validation failed: spec.acme.config: Required value: no ACME + solver configuration specified for domain "cd.apps.argoproj.io"' + inputPath: testdata/degraded_configError.yaml +- healthStatus: + status: Healthy + message: 'Certificate issued successfully' + inputPath: testdata/healthy_issued.yaml +- healthStatus: + status: Healthy + message: 'Certificate renewed successfully' + inputPath: testdata/healthy_renewed.yaml diff --git a/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/degraded_configError.yaml b/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/degraded_configError.yaml new file mode 100644 index 0000000..1680571 --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/degraded_configError.yaml @@ -0,0 +1,35 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"certmanager.k8s.io/v1alpha1","kind":"Certificate","metadata":{"annotations":{},"name":"test-cert","namespace":"argocd"},"spec":{"acme":{"config":[{"domains":["cd.apps.argoproj.io"],"http01":{"ingress":"http01"}}]},"commonName":"cd.apps.argoproj.io","dnsNames":["cd.apps.argoproj.io"],"issuerRef":{"kind":"Issuer","name":"argo-cd-issuer"}}} + creationTimestamp: "2019-02-15T18:17:06Z" + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: "68338442" + selfLink: /apis/certmanager.k8s.io/v1alpha1/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io123 + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret +status: + conditions: + - lastTransitionTime: "2019-02-15T18:26:37Z" + message: 'Resource validation failed: spec.acme.config: Required value: no ACME + solver configuration specified for domain "cd.apps.argoproj.io"' + reason: ConfigError + status: "False" + type: Ready diff --git a/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/healthy_issued.yaml b/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/healthy_issued.yaml new file mode 100644 index 0000000..d61f84f --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/healthy_issued.yaml @@ -0,0 +1,39 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + creationTimestamp: "2019-02-15T18:17:06Z" + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: "68337322" + selfLink: /apis/certmanager.k8s.io/v1alpha1/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret +status: + acme: + order: + url: https://acme-v02.api.letsencrypt.org/acme/order/45250083/316944902 + conditions: + - lastTransitionTime: "2019-02-15T18:21:10Z" + message: Order validated + reason: OrderValidated + status: "False" + type: ValidateFailed + - lastTransitionTime: null + message: Certificate issued successfully + reason: CertIssued + status: "True" + type: Ready diff --git a/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/healthy_renewed.yaml b/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/healthy_renewed.yaml new file mode 100644 index 0000000..877f050 --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/healthy_renewed.yaml @@ -0,0 +1,39 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + creationTimestamp: '2018-11-07T00:06:12Z' + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: '64763033' + selfLink: /apis/certmanager.k8s.io/v1alpha1/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret +status: + acme: + order: + url: 'https://acme-v02.api.letsencrypt.org/acme/order/45250083/298963150' + conditions: + - lastTransitionTime: '2019-02-03T09:48:13Z' + message: Certificate renewed successfully + reason: CertRenewed + status: 'True' + type: Ready + - lastTransitionTime: '2019-02-03T09:48:11Z' + message: Order validated + reason: OrderValidated + status: 'False' + type: ValidateFailed diff --git a/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..4fa2fea --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Certificate/testdata/progressing_noStatus.yaml @@ -0,0 +1,24 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + creationTimestamp: '2018-11-07T00:06:12Z' + generation: 1 + name: test-cert + namespace: argocd + resourceVersion: '64763033' + selfLink: /apis/certmanager.k8s.io/v1alpha1/namespaces/argocd/certificates/test-cert + uid: e6cfba50-314d-11e9-be3f-42010a800011 +spec: + acme: + config: + - domains: + - cd.apps.argoproj.io + http01: + ingress: http01 + commonName: cd.apps.argoproj.io + dnsNames: + - cd.apps.argoproj.io + issuerRef: + kind: Issuer + name: argo-cd-issuer + secretName: test-secret diff --git a/pkg/resource_customizations/certmanager.k8s.io/Issuer/health.lua b/pkg/resource_customizations/certmanager.k8s.io/Issuer/health.lua new file mode 100644 index 0000000..497a6a7 --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Issuer/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Initializing issuer" +return hs diff --git a/pkg/resource_customizations/certmanager.k8s.io/Issuer/health_test.yaml b/pkg/resource_customizations/certmanager.k8s.io/Issuer/health_test.yaml new file mode 100644 index 0000000..e48df5e --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Issuer/health_test.yaml @@ -0,0 +1,14 @@ +tests: +- healthStatus: + status: Progressing + message: Initializing issuer + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Healthy + message: The ACME account was registered with the ACME server + inputPath: testdata/healthy_registered.yaml +- healthStatus: + status: Degraded + message: "Failed to verify ACME account: acme: : 404 page not found\n" + inputPath: testdata/degraded_acmeFailed.yaml + diff --git a/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/degraded_acmeFailed.yaml b/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/degraded_acmeFailed.yaml new file mode 100644 index 0000000..5f0dbec --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/degraded_acmeFailed.yaml @@ -0,0 +1,28 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + creationTimestamp: "2019-02-15T19:23:48Z" + generation: 1 + name: test-issuer + namespace: argocd + resourceVersion: "68352438" + selfLink: /apis/certmanager.k8s.io/v1alpha1/namespaces/argocd/issuers/test-issuer + uid: 37f408e3-3157-11e9-be3f-42010a800011 +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory124 +status: + acme: + uri: "" + conditions: + - lastTransitionTime: "2019-02-15T19:23:53Z" + message: | + Failed to verify ACME account: acme: : 404 page not found + reason: ErrRegisterACMEAccount + status: "False" + type: Ready diff --git a/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/healthy_registered.yaml b/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/healthy_registered.yaml new file mode 100644 index 0000000..a5f6aa1 --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/healthy_registered.yaml @@ -0,0 +1,27 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + creationTimestamp: "2018-11-06T23:14:18Z" + generation: 1 + name: test-issuer + namespace: argocd + resourceVersion: "48889060" + selfLink: /apis/certmanager.k8s.io/v1alpha1/namespaces/argocd/issuers/argo-cd-issuer + uid: b0045219-e219-11e8-9f93-42010a80021d +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory +status: + acme: + uri: https://acme-v02.api.letsencrypt.org/acme/acct/45250083 + conditions: + - lastTransitionTime: "2018-12-06T06:42:59Z" + message: The ACME account was registered with the ACME server + reason: ACMEAccountRegistered + status: "True" + type: Ready diff --git a/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..501b7aa --- /dev/null +++ b/pkg/resource_customizations/certmanager.k8s.io/Issuer/testdata/progressing_noStatus.yaml @@ -0,0 +1,18 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + creationTimestamp: "2018-11-06T23:14:18Z" + generation: 1 + name: test-issuer + namespace: argocd + resourceVersion: "48889060" + selfLink: /apis/certmanager.k8s.io/v1alpha1/namespaces/argocd/issuers/argo-cd-issuer + uid: b0045219-e219-11e8-9f93-42010a80021d +spec: + acme: + email: myemail@example.com + http01: {} + privateKeySecretRef: + key: "" + name: letsencrypt + server: https://acme-v02.api.letsencrypt.org/directory diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/health.lua b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/health.lua new file mode 100644 index 0000000..3e07226 --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/health.lua @@ -0,0 +1,42 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + local ready = false + local synced = false + local suspended = false + + for i, condition in ipairs(obj.status.conditions) do + + if condition.type == "Ready" then + ready = condition.status == "True" + ready_message = condition.reason + elseif condition.type == "Synced" then + synced = condition.status == "True" + if condition.reason == "ReconcileError" then + synced_message = condition.message + elseif condition.reason == "ReconcilePaused" then + suspended = true + suspended_message = condition.reason + end + end + end + if ready and synced then + hs.status = "Healthy" + hs.message = ready_message + elseif synced == false and suspended == true then + hs.status = "Suspended" + hs.message = suspended_message + elseif ready == false and synced == true and suspended == false then + hs.status = "Progressing" + hs.message = "Waiting for distribution to be available" + else + hs.status = "Degraded" + hs.message = synced_message + end + return hs + end +end + +hs.status = "Progressing" +hs.message = "Waiting for distribution to be created" +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/health_test.yaml b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/health_test.yaml new file mode 100644 index 0000000..981a600 --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/health_test.yaml @@ -0,0 +1,37 @@ +tests: +- healthStatus: + status: Progressing + message: Waiting for distribution to be available + inputPath: testdata/progressing_creating.yaml +- healthStatus: + status: Progressing + message: Waiting for distribution to be available + inputPath: testdata/progressing_noavailable.yaml +- healthStatus: + status: Progressing + message: Waiting for distribution to be available + inputPath: testdata/progressing.yaml +- healthStatus: + status: Progressing + message: Waiting for distribution to be created + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Degraded + message: > + update failed: cannot update Distribution in AWS: InvalidParameter: 2 + validation error(s) found. + + - missing required field, + UpdateDistributionInput.DistributionConfig.Origins.Items[0].DomainName. + + - missing required field, + UpdateDistributionInput.DistributionConfig.Origins.Items[0].Id. + inputPath: testdata/degraded_reconcileError.yaml +- healthStatus: + status: Suspended + message: ReconcilePaused + inputPath: testdata/suspended.yaml +- healthStatus: + status: Healthy + message: Available + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/degraded_reconcileError.yaml b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/degraded_reconcileError.yaml new file mode 100644 index 0000000..80ea793 --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/degraded_reconcileError.yaml @@ -0,0 +1,96 @@ +apiVersion: cloudfront.aws.crossplane.io/v1alpha1 +kind: Distribution +metadata: + creationTimestamp: '2024-01-17T07:26:02Z' + generation: 2 + name: crossplane.io + resourceVersion: '261942288' + uid: 4b50c88b-165c-4176-be8e-aa28fdec0a94 +spec: + deletionPolicy: Orphan + forProvider: + distributionConfig: + comment: 'crossplane' + customErrorResponses: + items: [] + defaultCacheBehavior: + allowedMethods: + cachedMethods: + items: + - HEAD + - GET + items: + - HEAD + - GET + compress: false + defaultTTL: 600 + fieldLevelEncryptionID: '' + forwardedValues: + cookies: + forward: none + headers: + items: [] + queryString: false + queryStringCacheKeys: {} + functionAssociations: {} + lambdaFunctionAssociations: {} + maxTTL: 600 + minTTL: 0 + smoothStreaming: false + targetOriginID: crossplane.io + trustedKeyGroups: + enabled: false + trustedSigners: + enabled: false + viewerProtocolPolicy: allow-all + defaultRootObject: index.html + enabled: true + httpVersion: http2 + isIPV6Enabled: true + logging: + bucket: '' + enabled: false + includeCookies: false + prefix: '' + originGroups: {} + origins: + items: + - connectionAttempts: 3 + connectionTimeout: 10 + customOriginConfig: + httpPort: 8080 + httpSPort: 443 + originKeepaliveTimeout: 5 + originProtocolPolicy: http-only + originReadTimeout: 10 + originSSLProtocols: + items: + - TLSv1 + - TLSv1.1 + - TLSv1.2 + priceClass: PriceClass_200 + restrictions: + geoRestriction: + restrictionType: none + region: ap-northeast-2 + providerConfigRef: + name: crossplane +status: + conditions: + - lastTransitionTime: '2024-01-17T07:26:02Z' + message: > + update failed: cannot update Distribution in AWS: InvalidParameter: 2 + validation error(s) found. + + - missing required field, + UpdateDistributionInput.DistributionConfig.Origins.Items[0].DomainName. + + - missing required field, + UpdateDistributionInput.DistributionConfig.Origins.Items[0].Id. + reason: ReconcileError + status: 'False' + type: Synced + - lastTransitionTime: '2024-01-17T07:26:03Z' + reason: Available + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/healthy.yaml b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/healthy.yaml new file mode 100644 index 0000000..23d0287 --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/healthy.yaml @@ -0,0 +1,92 @@ +apiVersion: cloudfront.aws.crossplane.io/v1alpha1 +kind: Distribution +metadata: + creationTimestamp: "2023-09-07T01:01:16Z" + generation: 121 + name: crossplane.io + resourceVersion: "254225966" + uid: 531d989c-a3d2-4ab4-841d-ab380cce0bdb +spec: + deletionPolicy: Orphan + forProvider: + distributionConfig: + comment: 'crossplane' + customErrorResponses: + items: [] + defaultCacheBehavior: + allowedMethods: + cachedMethods: + items: + - HEAD + - GET + items: + - HEAD + - GET + compress: false + defaultTTL: 600 + fieldLevelEncryptionID: '' + forwardedValues: + cookies: + forward: none + headers: + items: [] + queryString: false + queryStringCacheKeys: {} + functionAssociations: {} + lambdaFunctionAssociations: {} + maxTTL: 600 + minTTL: 0 + smoothStreaming: false + targetOriginID: crossplane.io + trustedKeyGroups: + enabled: false + trustedSigners: + enabled: false + viewerProtocolPolicy: allow-all + defaultRootObject: index.html + enabled: true + httpVersion: http2 + isIPV6Enabled: true + logging: + bucket: '' + enabled: false + includeCookies: false + prefix: '' + originGroups: {} + origins: + items: + - connectionAttempts: 3 + connectionTimeout: 10 + customHeaders: {} + customOriginConfig: + httpPort: 8080 + httpSPort: 443 + originKeepaliveTimeout: 5 + originProtocolPolicy: http-only + originReadTimeout: 10 + originSSLProtocols: + items: + - TLSv1 + - TLSv1.1 + - TLSv1.2 + domainName: crossplane.io + id: crossplane.io + originShield: + enabled: false + priceClass: PriceClass_200 + restrictions: + geoRestriction: + restrictionType: none + region: ap-northeast-2 + providerConfigRef: + name: crossplane +status: + conditions: + - lastTransitionTime: "2024-01-11T06:23:18Z" + reason: ReconcileSuccess + status: "True" + type: Synced + - lastTransitionTime: "2024-01-10T03:23:02Z" + reason: Available + status: "True" + type: Ready diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing.yaml b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing.yaml new file mode 100644 index 0000000..3dbde7e --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing.yaml @@ -0,0 +1,92 @@ +apiVersion: cloudfront.aws.crossplane.io/v1alpha1 +kind: Distribution +metadata: + creationTimestamp: '2023-06-16T04:42:04Z' + generation: 37 + name: crossplane.io + resourceVersion: '254326453' + uid: fd357670-b762-4285-ae83-00859c40dd6b +spec: + deletionPolicy: Orphan + forProvider: + distributionConfig: + comment: 'crossplane' + customErrorResponses: + items: [] + defaultCacheBehavior: + allowedMethods: + cachedMethods: + items: + - HEAD + - GET + items: + - GET + - HEAD + compress: false + defaultTTL: 600 + fieldLevelEncryptionID: "" + forwardedValues: + cookies: + forward: none + headers: + items: [] + queryString: false + queryStringCacheKeys: {} + functionAssociations: {} + lambdaFunctionAssociations: {} + maxTTL: 600 + minTTL: 0 + smoothStreaming: false + targetOriginID: crossplane.io + trustedKeyGroups: + enabled: false + trustedSigners: + enabled: false + viewerProtocolPolicy: allow-all + defaultRootObject: index.html + enabled: true + httpVersion: http2 + isIPV6Enabled: true + logging: + bucket: "" + enabled: false + includeCookies: false + prefix: "" + originGroups: {} + origins: + items: + - connectionAttempts: 3 + connectionTimeout: 10 + customHeaders: {} + customOriginConfig: + httpPort: 8080 + httpSPort: 443 + originKeepaliveTimeout: 5 + originProtocolPolicy: http-only + originReadTimeout: 10 + originSSLProtocols: + items: + - TLSv1 + - TLSv1.1 + - TLSv1.2 + domainName: crossplane.io + id: crossplane.io + originShield: + enabled: false + priceClass: PriceClass_200 + restrictions: + geoRestriction: + restrictionType: none + region: ap-northeast-2 + providerConfigRef: + name: crossplane +status: + conditions: + - lastTransitionTime: '2024-01-11T08:11:27Z' + reason: Unavailable + status: 'False' + type: Ready + - lastTransitionTime: '2024-01-11T08:11:02Z' + reason: ReconcileSuccess + status: 'True' + type: Synced diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_creating.yaml b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_creating.yaml new file mode 100644 index 0000000..122ab33 --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_creating.yaml @@ -0,0 +1,92 @@ +apiVersion: cloudfront.aws.crossplane.io/v1alpha1 +kind: Distribution +metadata: + creationTimestamp: "2023-09-07T01:01:16Z" + generation: 121 + name: crossplane.io + resourceVersion: "254225966" + uid: 531d989c-a3d2-4ab4-841d-ab380cce0bdb +spec: + deletionPolicy: Orphan + forProvider: + distributionConfig: + comment: 'crossplane' + customErrorResponses: + items: [] + defaultCacheBehavior: + allowedMethods: + cachedMethods: + items: + - HEAD + - GET + items: + - GET + - HEAD + compress: false + defaultTTL: 600 + fieldLevelEncryptionID: "" + forwardedValues: + cookies: + forward: none + headers: + items: [] + queryString: false + queryStringCacheKeys: {} + functionAssociations: {} + lambdaFunctionAssociations: {} + maxTTL: 600 + minTTL: 0 + smoothStreaming: false + targetOriginID: crossplane.io + trustedKeyGroups: + enabled: false + trustedSigners: + enabled: false + viewerProtocolPolicy: allow-all + defaultRootObject: index.html + enabled: true + httpVersion: http2 + isIPV6Enabled: true + logging: + bucket: "" + enabled: false + includeCookies: false + prefix: "" + originGroups: {} + origins: + items: + - connectionAttempts: 3 + connectionTimeout: 10 + customHeaders: {} + customOriginConfig: + httpPort: 8080 + httpSPort: 443 + originKeepaliveTimeout: 5 + originProtocolPolicy: http-only + originReadTimeout: 10 + originSSLProtocols: + items: + - TLSv1 + - TLSv1.1 + - TLSv1.2 + domainName: crossplane.io + id: crossplane.io + originShield: + enabled: false + priceClass: PriceClass_200 + restrictions: + geoRestriction: + restrictionType: none + region: ap-northeast-2 + providerConfigRef: + name: crossplane +status: + conditions: + - lastTransitionTime: "2023-11-16T04:44:27Z" + reason: Creating + status: "False" + type: Ready + - lastTransitionTime: "2023-11-16T04:44:25Z" + reason: ReconcileSuccess + status: "True" + type: Synced diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..2985ec2 --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_noStatus.yaml @@ -0,0 +1,82 @@ +apiVersion: cloudfront.aws.crossplane.io/v1alpha1 +kind: Distribution +metadata: + creationTimestamp: "2023-09-07T01:01:16Z" + generation: 121 + name: crossplane.io + resourceVersion: "254225966" + uid: 531d989c-a3d2-4ab4-841d-ab380cce0bdb +spec: + deletionPolicy: Orphan + forProvider: + distributionConfig: + comment: 'crossplane' + customErrorResponses: + items: [] + defaultCacheBehavior: + allowedMethods: + cachedMethods: + items: + - HEAD + - GET + items: + - GET + - HEAD + compress: false + defaultTTL: 600 + fieldLevelEncryptionID: "" + forwardedValues: + cookies: + forward: none + headers: + items: [] + queryString: false + queryStringCacheKeys: {} + functionAssociations: {} + lambdaFunctionAssociations: {} + maxTTL: 600 + minTTL: 0 + smoothStreaming: false + targetOriginID: crossplane.io + trustedKeyGroups: + enabled: false + trustedSigners: + enabled: false + viewerProtocolPolicy: allow-all + defaultRootObject: index.html + enabled: true + httpVersion: http2 + isIPV6Enabled: true + logging: + bucket: "" + enabled: false + includeCookies: false + prefix: "" + originGroups: {} + origins: + items: + - connectionAttempts: 3 + connectionTimeout: 10 + customHeaders: {} + customOriginConfig: + httpPort: 8080 + httpSPort: 443 + originKeepaliveTimeout: 5 + originProtocolPolicy: http-only + originReadTimeout: 10 + originSSLProtocols: + items: + - TLSv1 + - TLSv1.1 + - TLSv1.2 + domainName: crossplane.io + id: crossplane.io + originShield: + enabled: false + priceClass: PriceClass_200 + restrictions: + geoRestriction: + restrictionType: none + region: ap-northeast-2 + providerConfigRef: + name: crossplane diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_noavailable.yaml b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_noavailable.yaml new file mode 100644 index 0000000..7a47b0f --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/progressing_noavailable.yaml @@ -0,0 +1,88 @@ +apiVersion: cloudfront.aws.crossplane.io/v1alpha1 +kind: Distribution +metadata: + generation: 1 + name: crossplane.io + resourceVersion: "261937039" + uid: a52c105f-b0e1-4027-aa19-7e93f269f2a6 +spec: + deletionPolicy: Orphan + forProvider: + distributionConfig: + comment: 'crossplane' + customErrorResponses: + items: [] + defaultCacheBehavior: + allowedMethods: + cachedMethods: + items: + - HEAD + - GET + items: + - GET + - HEAD + compress: false + defaultTTL: 600 + fieldLevelEncryptionID: "" + forwardedValues: + cookies: + forward: none + headers: + items: [] + queryString: false + queryStringCacheKeys: {} + functionAssociations: {} + lambdaFunctionAssociations: {} + maxTTL: 600 + minTTL: 0 + smoothStreaming: false + targetOriginID: crossplane.io + trustedKeyGroups: + enabled: false + trustedSigners: + enabled: false + viewerProtocolPolicy: allow-all + defaultRootObject: index.html + enabled: true + httpVersion: http2 + isIPV6Enabled: true + logging: + bucket: "" + enabled: false + includeCookies: false + prefix: "" + originGroups: {} + origins: + items: + - connectionAttempts: 3 + connectionTimeout: 10 + customHeaders: {} + customOriginConfig: + httpPort: 8080 + httpSPort: 443 + originKeepaliveTimeout: 5 + originProtocolPolicy: http-only + originReadTimeout: 10 + originSSLProtocols: + items: + - TLSv1 + - TLSv1.1 + - TLSv1.2 + domainName: crossplane.io + id: crossplane.io + originShield: + enabled: false + priceClass: PriceClass_200 + restrictions: + geoRestriction: + restrictionType: none + region: ap-northeast-2 + providerConfigRef: + name: crossplane +status: + atProvider: {} + conditions: + - lastTransitionTime: "2024-01-17T07:20:35Z" + reason: ReconcileSuccess + status: "True" + type: Synced diff --git a/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/suspended.yaml b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/suspended.yaml new file mode 100644 index 0000000..d157137 --- /dev/null +++ b/pkg/resource_customizations/cloudfront.aws.crossplane.io/Distribution/testdata/suspended.yaml @@ -0,0 +1,94 @@ +apiVersion: cloudfront.aws.crossplane.io/v1alpha1 +kind: Distribution +metadata: + annotations: + crossplane.io/paused: "true" + creationTimestamp: "2023-06-16T04:42:04Z" + generation: 34 + name: crossplane.io + resourceVersion: "254259056" + uid: fd357670-b762-4285-ae83-00859c40dd6b +spec: + deletionPolicy: Orphan + forProvider: + distributionConfig: + comment: 'crossplane' + customErrorResponses: + items: [] + defaultCacheBehavior: + allowedMethods: + cachedMethods: + items: + - HEAD + - GET + items: + - GET + - HEAD + compress: false + defaultTTL: 600 + fieldLevelEncryptionID: "" + forwardedValues: + cookies: + forward: none + headers: + items: [] + queryString: false + queryStringCacheKeys: {} + functionAssociations: {} + lambdaFunctionAssociations: {} + maxTTL: 600 + minTTL: 0 + smoothStreaming: false + targetOriginID: crossplane.io + trustedKeyGroups: + enabled: false + trustedSigners: + enabled: false + viewerProtocolPolicy: allow-all + defaultRootObject: index.html + enabled: true + httpVersion: http2 + isIPV6Enabled: true + logging: + bucket: "" + enabled: false + includeCookies: false + prefix: "" + originGroups: {} + origins: + items: + - connectionAttempts: 3 + connectionTimeout: 10 + customHeaders: {} + customOriginConfig: + httpPort: 8080 + httpSPort: 443 + originKeepaliveTimeout: 5 + originProtocolPolicy: http-only + originReadTimeout: 10 + originSSLProtocols: + items: + - TLSv1 + - TLSv1.1 + - TLSv1.2 + domainName: crossplane.io + id: crossplane.io + originShield: + enabled: false + priceClass: PriceClass_200 + restrictions: + geoRestriction: + restrictionType: none + region: ap-northeast-2 + providerConfigRef: + name: crossplane +status: + conditions: + - lastTransitionTime: "2023-10-16T07:40:47Z" + reason: Available + status: "True" + type: Ready + - lastTransitionTime: "2024-01-11T06:59:47Z" + reason: ReconcilePaused + status: "False" + type: Synced diff --git a/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/health.lua b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/health_test.yaml b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/dependency_not_found.yaml b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..75887c8 --- /dev/null +++ b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudfunctions.cnrm.cloud.google.com/v1beta1 +kind: CloudFunctionsFunction +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..ef86c23 --- /dev/null +++ b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudfunctions.cnrm.cloud.google.com/v1beta1 +kind: CloudFunctionsFunction +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/up_to_date.yaml b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/up_to_date.yaml new file mode 100644 index 0000000..d889f3a --- /dev/null +++ b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudfunctions.cnrm.cloud.google.com/v1beta1 +kind: CloudFunctionsFunction +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/update_failed.yaml b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/update_failed.yaml new file mode 100644 index 0000000..59b953e --- /dev/null +++ b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudfunctions.cnrm.cloud.google.com/v1beta1 +kind: CloudFunctionsFunction +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/update_in_progress.yaml b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/update_in_progress.yaml new file mode 100644 index 0000000..4fa4cd1 --- /dev/null +++ b/pkg/resource_customizations/cloudfunctions.cnrm.cloud.google.com/CloudFunctionsFunction/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudfunctions.cnrm.cloud.google.com/v1beta1 +kind: CloudFunctionsFunction +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/health.lua b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/health_test.yaml b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/dependency_not_found.yaml b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..7dd2e9b --- /dev/null +++ b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudscheduler.cnrm.cloud.google.com/v1beta1 +kind: CloudSchedulerJob +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..f66433e --- /dev/null +++ b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudscheduler.cnrm.cloud.google.com/v1beta1 +kind: CloudSchedulerJob +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/up_to_date.yaml b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/up_to_date.yaml new file mode 100644 index 0000000..63803d1 --- /dev/null +++ b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudscheduler.cnrm.cloud.google.com/v1beta1 +kind: CloudSchedulerJob +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/update_failed.yaml b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/update_failed.yaml new file mode 100644 index 0000000..b568387 --- /dev/null +++ b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudscheduler.cnrm.cloud.google.com/v1beta1 +kind: CloudSchedulerJob +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/update_in_progress.yaml b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/update_in_progress.yaml new file mode 100644 index 0000000..3b90692 --- /dev/null +++ b/pkg/resource_customizations/cloudscheduler.cnrm.cloud.google.com/CloudSchedulerJob/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: cloudscheduler.cnrm.cloud.google.com/v1beta1 +kind: CloudSchedulerJob +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Cluster/health.lua b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/health.lua new file mode 100644 index 0000000..3f02513 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/health.lua @@ -0,0 +1,40 @@ +function getStatusBasedOnPhase(obj, hs) + hs.status = "Progressing" + hs.message = "Waiting for clusters" + if obj.status ~= nil and obj.status.phase ~= nil then + if obj.status.phase == "Provisioned" then + hs.status = "Healthy" + hs.message = "Cluster is running" + end + if obj.status.phase == "Failed" then + hs.status = "Degraded" + hs.message = "" + end + end + return hs +end + +function getReadyContitionStatus(obj, hs) + if obj.status ~= nil and obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + end + end + return hs +end + +local hs = {} +if obj.spec.paused ~= nil and obj.spec.paused then + hs.status = "Suspended" + hs.message = "Cluster is paused" + return hs +end + +getStatusBasedOnPhase(obj, hs) +getReadyContitionStatus(obj, hs) + +return hs diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Cluster/health_test.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/health_test.yaml new file mode 100644 index 0000000..f147587 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Progressing + message: 'Waiting for clusters' + inputPath: testdata/progressing_provisioning.yaml +- healthStatus: + status: Degraded + message: 'Error message' + inputPath: testdata/degraded_failed.yaml +- healthStatus: + status: Suspended + message: 'Cluster is paused' + inputPath: testdata/suspended_paused.yaml +- healthStatus: + status: Healthy + message: 'Cluster is running' + inputPath: testdata/healthy_provisioned.yaml +- healthStatus: + status: Degraded + message: 'Post "https://tvc01.foo.bar/sdk": host "tvc01.foo.bar:443" thumbprint does not match "0A:21:BD:FC:71:40:BD:96"' + inputPath: testdata/error_provisioned.yaml diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/degraded_failed.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/degraded_failed.yaml new file mode 100644 index 0000000..1ee7467 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/degraded_failed.yaml @@ -0,0 +1,41 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test + namespace: test +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.20.10.0/19 + services: + cidrBlocks: + - 10.10.10.0/19 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereCluster +status: + conditions: + - lastTransitionTime: '2020-12-21T07:41:10Z' + status: 'False' + message: "Error message" + type: Ready + - lastTransitionTime: '2020-12-29T09:16:28Z' + status: 'True' + type: ControlPlaneReady + - lastTransitionTime: '2020-11-24T09:15:24Z' + status: 'True' + type: InfrastructureReady + controlPlaneInitialized: true + controlPlaneReady: true + infrastructureReady: true + observedGeneration: 4 + phase: Failed diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/error_provisioned.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/error_provisioned.yaml new file mode 100644 index 0000000..e58123a --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/error_provisioned.yaml @@ -0,0 +1,54 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test + namespace: test +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.20.10.0/19 + services: + cidrBlocks: + - 10.10.10.0/19 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereCluster +status: + conditions: + - lastTransitionTime: '2022-12-14T07:45:14Z' + message: >- + Post "https://tvc01.foo.bar/sdk": host "tvc01.foo.bar:443" + thumbprint does not match + "0A:21:BD:FC:71:40:BD:96" + reason: VCenterUnreachable + severity: Error + status: 'False' + type: Ready + - lastTransitionTime: '2022-11-30T12:04:22Z' + status: 'True' + type: ControlPlaneInitialized + - lastTransitionTime: '2022-11-30T12:10:30Z' + status: 'True' + type: ControlPlaneReady + - lastTransitionTime: '2022-12-14T07:45:14Z' + message: >- + Post "https://tvc01.foo.bar/sdk": host "tvc01.foo.bar:443" + thumbprint does not match + "0A:21:BD:FC:71:40:BD:96" + reason: VCenterUnreachable + severity: Error + status: 'False' + type: InfrastructureReady + controlPlaneReady: true + infrastructureReady: true + observedGeneration: 2 + phase: Provisioned diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/healthy_provisioned.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/healthy_provisioned.yaml new file mode 100644 index 0000000..43c6f76 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/healthy_provisioned.yaml @@ -0,0 +1,40 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test + namespace: test +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.20.10.0/19 + services: + cidrBlocks: + - 10.10.10.0/19 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereCluster +status: + conditions: + - lastTransitionTime: '2020-12-29T09:16:28Z' + status: 'True' + type: Ready + - lastTransitionTime: '2020-12-29T09:16:28Z' + status: 'True' + type: ControlPlaneReady + - lastTransitionTime: '2020-11-24T09:15:24Z' + status: 'True' + type: InfrastructureReady + controlPlaneInitialized: true + controlPlaneReady: true + infrastructureReady: true + observedGeneration: 4 + phase: Provisioned diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/progressing_provisioning.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/progressing_provisioning.yaml new file mode 100644 index 0000000..f0befbe --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/progressing_provisioning.yaml @@ -0,0 +1,40 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test + namespace: test +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.20.10.0/19 + services: + cidrBlocks: + - 10.10.10.0/19 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereCluster +status: + conditions: + - lastTransitionTime: '2020-12-29T09:16:28Z' + status: 'True' + type: Ready + - lastTransitionTime: '2020-12-29T09:16:28Z' + status: 'True' + type: ControlPlaneReady + - lastTransitionTime: '2020-11-24T09:15:24Z' + status: 'True' + type: InfrastructureReady + controlPlaneInitialized: true + controlPlaneReady: true + infrastructureReady: true + observedGeneration: 4 + phase: Provisioning diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/suspended_paused.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/suspended_paused.yaml new file mode 100644 index 0000000..97db878 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Cluster/testdata/suspended_paused.yaml @@ -0,0 +1,42 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test + namespace: test +spec: + paused: true + clusterNetwork: + pods: + cidrBlocks: + - 10.20.10.0/19 + services: + cidrBlocks: + - 10.10.10.0/19 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereCluster +status: + conditions: + - lastTransitionTime: '2020-12-21T07:41:10Z' + status: 'False' + message: "Error message" + type: Ready + - lastTransitionTime: '2020-12-29T09:16:28Z' + status: 'True' + type: ControlPlaneReady + - lastTransitionTime: '2020-11-24T09:15:24Z' + status: 'True' + type: InfrastructureReady + controlPlaneInitialized: true + controlPlaneReady: true + infrastructureReady: true + observedGeneration: 4 + phase: Failed diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Machine/health.lua b/pkg/resource_customizations/cluster.x-k8s.io/Machine/health.lua new file mode 100644 index 0000000..146b795 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Machine/health.lua @@ -0,0 +1,34 @@ +function getStatusBasedOnPhase(obj) + local hs = {} + hs.status = "Progressing" + hs.message = "Waiting for machines" + if obj.status ~= nil and obj.status.phase ~= nil then + if obj.status.phase == "Running" then + hs.status = "Healthy" + hs.message = "Machine is running" + end + if obj.status.phase == "Failed" then + hs.status = "Degraded" + hs.message = "" + end + end + return hs +end + +function getReadyContitionMessage(obj) + if obj.status ~= nil and obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + return condition.message + end + end + end + return "Condition is unknown" +end + +local hs = getStatusBasedOnPhase(obj) +if hs.status ~= "Healthy" then + hs.message = getReadyContitionMessage(obj) +end + +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Machine/health_test.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Machine/health_test.yaml new file mode 100644 index 0000000..bd1a878 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Machine/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: '1 of 2 completed' + inputPath: testdata/progressing_boot.yaml +- healthStatus: + status: Degraded + message: 'Error message' + inputPath: testdata/degraded_failed.yaml +- healthStatus: + status: Healthy + message: 'Machine is running' + inputPath: testdata/healthy_running.yaml diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/degraded_failed.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/degraded_failed.yaml new file mode 100644 index 0000000..472f967 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/degraded_failed.yaml @@ -0,0 +1,44 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Machine +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-md-0-6cb7d48f56-frtdw + namespace: test +spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachine + version: v1.18.10 +status: + addresses: + - address: 10.10.10.10 + type: ExternalIP + bootstrapReady: true + conditions: + - lastTransitionTime: '2020-12-21T07:41:10Z' + status: 'False' + message: "Error message" + type: Ready + - lastTransitionTime: '2020-12-21T07:37:54Z' + status: 'True' + type: BootstrapReady + - lastTransitionTime: '2020-12-21T07:41:10Z' + status: 'True' + type: InfrastructureReady + - lastTransitionTime: '2020-12-21T07:42:11Z' + status: 'True' + type: NodeHealthy + infrastructureReady: true + lastUpdated: '2020-12-21T07:41:32Z' + nodeRef: + apiVersion: v1 + kind: Node + name: test-md-0-6cb7d48f56-frtdw + observedGeneration: 3 + phase: Failed diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/healthy_running.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/healthy_running.yaml new file mode 100644 index 0000000..acd9b90 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/healthy_running.yaml @@ -0,0 +1,43 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Machine +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-md-0-6cb7d48f56-frtdw + namespace: test +spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + clusterName: test-cl2 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachine + version: v1.18.10 +status: + addresses: + - address: 10.10.10.10 + type: ExternalIP + bootstrapReady: true + conditions: + - lastTransitionTime: '2020-12-21T07:41:10Z' + status: 'True' + type: Ready + - lastTransitionTime: '2020-12-21T07:37:54Z' + status: 'True' + type: BootstrapReady + - lastTransitionTime: '2020-12-21T07:41:10Z' + status: 'True' + type: InfrastructureReady + - lastTransitionTime: '2020-12-21T07:42:11Z' + status: 'True' + type: NodeHealthy + infrastructureReady: true + lastUpdated: '2020-12-21T07:41:32Z' + nodeRef: + apiVersion: v1 + kind: Node + name: test-md-0-6cb7d48f56-frtdw + observedGeneration: 3 + phase: Running diff --git a/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/progressing_boot.yaml b/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/progressing_boot.yaml new file mode 100644 index 0000000..bd6cb55 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/Machine/testdata/progressing_boot.yaml @@ -0,0 +1,46 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Machine +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-md-0-6cb7d48f56-frtdw + namespace: test +spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + clusterName: test-cl2 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachine + version: v1.18.10 +status: + addresses: + - address: 10.10.10.10 + type: ExternalIP + bootstrapReady: true + conditions: + - lastTransitionTime: '2020-12-29T09:41:28Z' + message: 1 of 2 completed + reason: Cloning + severity: Info + status: 'False' + type: Ready + - lastTransitionTime: '2020-12-21T07:37:54Z' + status: 'True' + type: BootstrapReady + - lastTransitionTime: '2020-12-21T07:41:10Z' + status: 'True' + type: InfrastructureReady + - lastTransitionTime: '2020-12-21T07:42:11Z' + status: 'True' + type: NodeHealthy + infrastructureReady: true + lastUpdated: '2020-12-21T07:41:32Z' + nodeRef: + apiVersion: v1 + kind: Node + name: test-md-0-6cb7d48f56-frtdw + observedGeneration: 3 + phase: Provisioning diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/health.lua b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/health.lua new file mode 100644 index 0000000..14b0103 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/health.lua @@ -0,0 +1,30 @@ +local hs = {} +hs.status = "Progressing" +hs.message = "Waiting for machines" + +if obj.spec.paused ~= nil and obj.spec.paused then + hs.status = "Suspended" + hs.message = "MachineDeployment is paused" + return hs +end + +if obj.status ~= nil and obj.status.phase ~= nil then + if obj.status.phase == "Running" then + hs.status = "Healthy" + hs.message = "Machines are running under this deployment" + end + if obj.status.phase == "ScalingUp" then + hs.status = "Progressing" + hs.message = "Cluster is spawning machines" + end + if obj.status.phase == "ScalingDown" then + hs.status = "Progressing" + hs.message = "Cluster is stopping machines" + end + if obj.status.phase == "Failed" then + hs.status = "Degraded" + hs.message = "MachineDeployment is failed" + end +end + +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/health_test.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/health_test.yaml new file mode 100644 index 0000000..351811e --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Progressing + message: 'Cluster is spawning machines' + inputPath: testdata/progressing_ScalingUp.yaml +- healthStatus: + status: Progressing + message: 'Cluster is stopping machines' + inputPath: testdata/progressing_ScalingDown.yaml +- healthStatus: + status: Degraded + message: 'MachineDeployment is failed' + inputPath: testdata/degraded_failed.yaml +- healthStatus: + status: Suspended + message: 'MachineDeployment is paused' + inputPath: testdata/suspended_paused.yaml +- healthStatus: + status: Healthy + message: 'Machines are running under this deployment' + inputPath: testdata/healthy_provisioned.yaml diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/degraded_failed.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/degraded_failed.yaml new file mode 100644 index 0000000..ba10af5 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/degraded_failed.yaml @@ -0,0 +1,48 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + generation: 3 + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: test +spec: + clusterName: test + minReadySeconds: 0 + progressDeadlineSeconds: 600 + replicas: 5 + revisionHistoryLimit: 1 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachineTemplate + version: v1.18.10 +status: + availableReplicas: 5 + observedGeneration: 3 + phase: Failed + readyReplicas: 5 + replicas: 5 + updatedReplicas: 5 diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/healthy_provisioned.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/healthy_provisioned.yaml new file mode 100644 index 0000000..c165d49 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/healthy_provisioned.yaml @@ -0,0 +1,48 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + generation: 3 + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: test +spec: + clusterName: test + minReadySeconds: 0 + progressDeadlineSeconds: 600 + replicas: 5 + revisionHistoryLimit: 1 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachineTemplate + version: v1.18.10 +status: + availableReplicas: 5 + observedGeneration: 3 + phase: Running + readyReplicas: 5 + replicas: 5 + updatedReplicas: 5 diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/progressing_ScalingDown.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/progressing_ScalingDown.yaml new file mode 100644 index 0000000..ccbb34e --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/progressing_ScalingDown.yaml @@ -0,0 +1,48 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + generation: 3 + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: test +spec: + clusterName: test + minReadySeconds: 0 + progressDeadlineSeconds: 600 + replicas: 5 + revisionHistoryLimit: 1 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachineTemplate + version: v1.18.10 +status: + availableReplicas: 6 + observedGeneration: 3 + phase: ScalingDown + readyReplicas: 6 + replicas: 5 + updatedReplicas: 5 diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/progressing_ScalingUp.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/progressing_ScalingUp.yaml new file mode 100644 index 0000000..af4af99 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/progressing_ScalingUp.yaml @@ -0,0 +1,48 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + generation: 3 + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: test +spec: + clusterName: test + minReadySeconds: 0 + progressDeadlineSeconds: 600 + replicas: 5 + revisionHistoryLimit: 1 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachineTemplate + version: v1.18.10 +status: + availableReplicas: 4 + observedGeneration: 3 + phase: ScalingUp + readyReplicas: 4 + replicas: 5 + updatedReplicas: 5 diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/suspended_paused.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/suspended_paused.yaml new file mode 100644 index 0000000..d297f57 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineDeployment/testdata/suspended_paused.yaml @@ -0,0 +1,49 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + generation: 3 + labels: + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 0.3.11 + argocd.argoproj.io/instance: test + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: test +spec: + paused: true + clusterName: test + minReadySeconds: 0 + progressDeadlineSeconds: 600 + replicas: 5 + revisionHistoryLimit: 1 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + cluster.x-k8s.io/deployment-name: teszst-md-0 + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachineTemplate + version: v1.18.10 +status: + availableReplicas: 5 + observedGeneration: 3 + phase: Running + readyReplicas: 5 + replicas: 5 + updatedReplicas: 5 diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/health.lua b/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/health.lua new file mode 100644 index 0000000..b8cff71 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/health.lua @@ -0,0 +1,14 @@ +local hs = {} + +hs.status = "Progressing" +hs.message = "" + +if obj.status ~= nil and obj.status.currentHealthy ~= nil then + if obj.status.expectedMachines == obj.status.currentHealthy then + hs.status = "Healthy" + else + hs.status = "Degraded" + end +end + +return hs diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/health_test.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/health_test.yaml new file mode 100644 index 0000000..af03a34 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/health_test.yaml @@ -0,0 +1,9 @@ +tests: +- healthStatus: + status: Degraded + message: '' + inputPath: testdata/degraded_expectedMachines.yaml +- healthStatus: + status: Healthy + message: '' + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/testdata/degraded_expectedMachines.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/testdata/degraded_expectedMachines.yaml new file mode 100644 index 0000000..8e1bc9b --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/testdata/degraded_expectedMachines.yaml @@ -0,0 +1,33 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-node-unhealthy-5m +spec: + clusterName: test + maxUnhealthy: 100% + nodeStartupTimeout: 10m0s + selector: + matchLabels: + cluster.x-k8s.io/deployment-name: test-md-workers-0 + unhealthyConditions: + - status: Unknown + timeout: 5m + type: Ready + - status: "False" + timeout: 5m + type: Ready +status: + conditions: + - lastTransitionTime: "2022-10-07T10:33:46Z" + status: "True" + type: RemediationAllowed + currentHealthy: 1 + expectedMachines: 3 + observedGeneration: 3 + remediationsAllowed: 1 + targets: + - test-md-workers-0-76f7db5786-8nl6m + - test-md-workers-0-76f7db5786-jjzvf + - test-md-workers-0-76f7db5786-l4vfb diff --git a/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/testdata/healthy.yaml b/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/testdata/healthy.yaml new file mode 100644 index 0000000..cc2e490 --- /dev/null +++ b/pkg/resource_customizations/cluster.x-k8s.io/MachineHealthCheck/testdata/healthy.yaml @@ -0,0 +1,33 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-node-unhealthy-5m +spec: + clusterName: test + maxUnhealthy: 100% + nodeStartupTimeout: 10m0s + selector: + matchLabels: + cluster.x-k8s.io/deployment-name: test-md-workers-0 + unhealthyConditions: + - status: Unknown + timeout: 5m + type: Ready + - status: "False" + timeout: 5m + type: Ready +status: + conditions: + - lastTransitionTime: "2022-10-07T10:33:46Z" + status: "True" + type: RemediationAllowed + currentHealthy: 3 + expectedMachines: 3 + observedGeneration: 3 + remediationsAllowed: 1 + targets: + - test-md-workers-0-76f7db5786-8nl6m + - test-md-workers-0-76f7db5786-jjzvf + - test-md-workers-0-76f7db5786-l4vfb diff --git a/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/health.lua b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/health_test.yaml b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/dependency_not_found.yaml b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..0a20522 --- /dev/null +++ b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeDisk +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..abc75e5 --- /dev/null +++ b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeDisk +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/up_to_date.yaml b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/up_to_date.yaml new file mode 100644 index 0000000..42d206c --- /dev/null +++ b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeDisk +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/update_failed.yaml b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/update_failed.yaml new file mode 100644 index 0000000..e2bb1e6 --- /dev/null +++ b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeDisk +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/update_in_progress.yaml b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/update_in_progress.yaml new file mode 100644 index 0000000..6149df1 --- /dev/null +++ b/pkg/resource_customizations/compute.cnrm.cloud.google.com/ComputeDisk/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeDisk +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/health.lua b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/health.lua new file mode 100644 index 0000000..332b43e --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/health.lua @@ -0,0 +1,37 @@ +hs = {} + +local function readyCond(obj) + if obj.status ~= nil and obj.status.conditions ~= nil then + for _, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" then + return condition + end + end + end + return nil +end + +local ready = readyCond(obj) + +if ready == nil then + hs.status = "Progressing" + hs.message = "Waiting for Atlas Operator" + return hs +end + +if ready.status == "True" then + hs.status = "Healthy" + hs.message = ready.reason + return hs +end + +if ready.reason == "Reconciling" then + hs.status = "Progressing" +else + hs.status = "Degraded" +end + +hs.message = ready.reason + +return hs + diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/health_test.yaml b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/health_test.yaml new file mode 100644 index 0000000..b827f89 --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "Reconciling" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "Migrating" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "Applied" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/degraded.yaml b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/degraded.yaml new file mode 100644 index 0000000..ee51f15 --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/degraded.yaml @@ -0,0 +1,29 @@ +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasMigration +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"db.atlasgo.io/v1alpha1","kind":"AtlasMigration","metadata":{"annotations":{},"name":"atlasmigration-sample","namespace":"default"},"spec":{"dir":{"configMapRef":{"name":"migration-dir"}},"urlFrom":{"secretKeyRef":{"key":"url","name":"mysql-credentials"}}}} + creationTimestamp: "2023-11-16T08:37:23Z" + generation: 1 + name: atlasmigration-sample + namespace: default + resourceVersion: "49923" + uid: 0d5bc3d6-750e-4f5a-82a3-8b9173106ef4 +spec: + dir: + configMapRef: + name: migration-dir + urlFrom: + secretKeyRef: + key: url + name: mysql-credentials +status: + conditions: + - lastTransitionTime: "2023-11-16T08:37:23Z" + message: 'Error: checksum mismatch' + reason: Migrating + status: "False" + type: Ready + lastApplied: 0 + observed_hash: "" diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/healthy.yaml b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/healthy.yaml new file mode 100644 index 0000000..4a7a913 --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/healthy.yaml @@ -0,0 +1,30 @@ +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasMigration +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"db.atlasgo.io/v1alpha1","kind":"AtlasMigration","metadata":{"annotations":{},"name":"atlasmigration-sample","namespace":"default"},"spec":{"dir":{"configMapRef":{"name":"migration-dir"}},"urlFrom":{"secretKeyRef":{"key":"url","name":"mysql-credentials"}}}} + creationTimestamp: "2023-11-16T08:37:23Z" + generation: 1 + name: atlasmigration-sample + namespace: default + resourceVersion: "50387" + uid: 0d5bc3d6-750e-4f5a-82a3-8b9173106ef4 +spec: + dir: + configMapRef: + name: migration-dir + urlFrom: + secretKeyRef: + key: url + name: mysql-credentials +status: + conditions: + - lastTransitionTime: "2023-11-16T08:46:27Z" + message: "" + reason: Applied + status: "True" + type: Ready + lastApplied: 1700124387 + lastAppliedVersion: "20230316085611" + observed_hash: 4969b3c84c097ff61a9f9722b595a66c1a4473bd85fdd282107b98a92db8a43b diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/progressing.yaml b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/progressing.yaml new file mode 100644 index 0000000..024f9f7 --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasMigration/testdata/progressing.yaml @@ -0,0 +1,30 @@ +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasMigration +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"db.atlasgo.io/v1alpha1","kind":"AtlasMigration","metadata":{"annotations":{},"name":"atlasmigration-sample","namespace":"default"},"spec":{"dir":{"configMapRef":{"name":"migration-dir"}},"urlFrom":{"secretKeyRef":{"key":"url","name":"mysql-credentials"}}}} + creationTimestamp: "2023-11-16T08:37:23Z" + generation: 1 + name: atlasmigration-sample + namespace: default + resourceVersion: "50387" + uid: 0d5bc3d6-750e-4f5a-82a3-8b9173106ef4 +spec: + dir: + configMapRef: + name: migration-dir + urlFrom: + secretKeyRef: + key: url + name: mysql-credentials +status: + conditions: + - lastTransitionTime: "2023-11-16T08:46:27Z" + message: "Current migration data has changed" + reason: "Reconciling" + status: "False" + type: Ready + lastApplied: 1700124387 + lastAppliedVersion: "20230316085611" + observed_hash: 4969b3c84c097ff61a9f9722b595a66c1a4473bd85fdd282107b98a92db8a43b diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/health.lua b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/health.lua new file mode 100644 index 0000000..c66d66d --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/health.lua @@ -0,0 +1,37 @@ +hs = {} + +local function readyCond(obj) + if obj.status ~= nil and obj.status.conditions ~= nil then + for _, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" then + return condition + end + end + end + return nil +end + +local ready = readyCond(obj) + +if ready == nil then + hs.status = "Progressing" + hs.message = "Waiting for Atlas Operator" + return hs +end + +if ready.status == "True" then + hs.status = "Healthy" + hs.message = ready.reason + return hs +end + +if ready.message == "Reconciling" or ready.message == "GettingDevDB" then + hs.status = "Progressing" +else + hs.status = "Degraded" +end + +hs.message = ready.reason + +return hs + diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/health_test.yaml b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/health_test.yaml new file mode 100644 index 0000000..0fe102f --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "Reconciling" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "ApplyingSchema" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "Applied" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/degraded.yaml b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/degraded.yaml new file mode 100644 index 0000000..0838398 --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/degraded.yaml @@ -0,0 +1,38 @@ +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"db.atlasgo.io/v1alpha1","kind":"AtlasSchema","metadata":{"annotations":{},"name":"atlasschema-mysql","namespace":"default"},"spec":{"schema":{"sql":"create table users (\n id int not null auto_increment,\n name varchar(255) not null,\n email varchar(255) unique not null,\n short_bio varchar(255) not null,\n primary key (id)\n);\n"},"urlFrom":{"secretKeyRef":{"key":"url","name":"mysql-credentials"}}}} + creationTimestamp: "2023-11-15T14:33:18Z" + generation: 2 + name: atlasschema-mysql + namespace: default + resourceVersion: "46659" + uid: 54a4cdfc-e4f9-4c3d-934c-e08b6122e38a +spec: + schema: + sql: | + xcreate table users ( + id int not null auto_increment, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + urlFrom: + secretKeyRef: + key: url + name: mysql-credentials +status: + conditions: + - lastTransitionTime: "2023-11-15T14:38:41Z" + message: |- + Error: sql/migrate: read migration directory state: sql/migrate: execute: executing statement "xcreate table users (\n id int not null auto_increment,\n name varchar(255) not null,\n email varchar(255) unique not null,\n short_bio varchar(255) not null,\n primary key (id)\n);" from version "schema": Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'xcreate table users ( + id int not null auto_increment, + name varchar(255) not ' at line 1 + reason: ApplyingSchema + status: "False" + type: Ready + last_applied: 1700058814 + observed_hash: ddfe666707ddf5c2cc7625c2a0de89da51e54fc7caa6403db307146430d20d85 diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/healthy.yaml b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/healthy.yaml new file mode 100644 index 0000000..eca8ec4 --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/healthy.yaml @@ -0,0 +1,39 @@ +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"db.atlasgo.io/v1alpha1","kind":"AtlasSchema","metadata":{"annotations":{},"name":"atlasschema-mysql","namespace":"default"},"spec":{"schema":{"sql":"create table users (\n id int not null auto_increment,\n name varchar(255) not null,\n email varchar(255) unique not null,\n short_bio varchar(255) not null,\n primary key (id)\n);\n"},"urlFrom":{"secretKeyRef":{"key":"url","name":"mysql-credentials"}}}} + creationTimestamp: "2023-11-15T14:33:18Z" + generation: 1 + name: atlasschema-mysql + namespace: default + resourceVersion: "46390" + uid: 54a4cdfc-e4f9-4c3d-934c-e08b6122e38a +spec: + schema: + sql: | + create table users ( + id int not null auto_increment, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + urlFrom: + secretKeyRef: + key: url + name: mysql-credentials +status: + conditions: + - lastTransitionTime: "2023-11-15T14:33:34Z" + message: 'The schema has been applied successfully. Apply response: {"Driver":"mysql","URL":{"Scheme":"mysql","Opaque":"","User":{},"Host":"mysql.default:3306","Path":"/myapp","RawPath":"","OmitHost":false,"ForceQuery":false,"RawQuery":"parseTime=true","Fragment":"","RawFragment":"","Schema":"myapp"},"Changes":{"Applied":["CREATE + TABLE `users` (\n `id` int NOT NULL AUTO_INCREMENT,\n `name` varchar(255) + NOT NULL,\n `email` varchar(255) NOT NULL,\n `short_bio` varchar(255) NOT + NULL,\n PRIMARY KEY (`id`),\n UNIQUE INDEX `email` (`email`)\n) CHARSET utf8mb4 + COLLATE utf8mb4_0900_ai_ci"]}}' + reason: Applied + status: "True" + type: Ready + last_applied: 1700058814 + observed_hash: ddfe666707ddf5c2cc7625c2a0de89da51e54fc7caa6403db307146430d20d85 diff --git a/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/progressing.yaml b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/progressing.yaml new file mode 100644 index 0000000..79d59ca --- /dev/null +++ b/pkg/resource_customizations/db.atlasgo.io/AtlasSchema/testdata/progressing.yaml @@ -0,0 +1,35 @@ +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"db.atlasgo.io/v1alpha1","kind":"AtlasSchema","metadata":{"annotations":{},"name":"atlasschema-mysql","namespace":"default"},"spec":{"schema":{"sql":"create table users (\n id int not null auto_increment,\n name varchar(255) not null,\n email varchar(255) unique not null,\n short_bio varchar(255) not null,\n primary key (id)\n);\n"},"urlFrom":{"secretKeyRef":{"key":"url","name":"mysql-credentials"}}}} + creationTimestamp: "2023-11-15T14:33:18Z" + generation: 1 + name: atlasschema-mysql + namespace: default + resourceVersion: "46390" + uid: 54a4cdfc-e4f9-4c3d-934c-e08b6122e38a +spec: + schema: + sql: | + create table users ( + id int not null auto_increment, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + urlFrom: + secretKeyRef: + key: url + name: mysql-credentials +status: + conditions: + - lastTransitionTime: "2023-11-15T14:33:34Z" + message: 'Reconciling' + reason: Reconciling + status: "False" + type: Ready + last_applied: 1700058814 + observed_hash: ddfe666707ddf5c2cc7625c2a0de89da51e54fc7caa6403db307146430d20d85 diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/health.lua b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/health.lua new file mode 100644 index 0000000..241413e --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/health.lua @@ -0,0 +1,48 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.availableNodes ~= nil then + local sum = 0 + for _,node in pairs(obj.spec.nodeSets) do + sum = sum + node.count + end + if obj.status.availableNodes < sum then + hs.status = "Progressing" + hs.message = "The desired amount of availableNodes is " .. sum .. " but the current amount is " .. obj.status.availableNodes + return hs + elseif obj.status.availableNodes == sum then + if obj.status.phase ~= nil and obj.status.health ~= nil then + if obj.status.phase == "Ready" then + if obj.status.health == "green" then + hs.status = "Healthy" + hs.message = "Elasticsearch Cluster status is Green" + return hs + elseif obj.status.health == "yellow" then + hs.status = "Progressing" + hs.message = "Elasticsearch Cluster status is Yellow. Check the status of indices, replicas and shards" + return hs + elseif obj.status.health == "red" then + hs.status = "Degraded" + hs.message = "Elasticsearch Cluster status is Red. Check the status of indices, replicas and shards" + return hs + end + elseif obj.status.phase == "ApplyingChanges" then + hs.status = "Progressing" + hs.message = "Elasticsearch phase is ApplyingChanges" + return hs + elseif obj.status.phase == "MigratingData" then + hs.status = "Progressing" + hs.message = "Elasticsearch phase is MigratingData" + return hs + elseif obj.status.phase == "Invalid" then + hs.status = "Degraded" + hs.message = "Elasticsearch phase is Invalid" + return hs + end + end + end + end +end + +hs.status = "Unknown" +hs.message = "Elasticsearch Cluster status is unknown. Ensure your ArgoCD is current and then check for/file a bug report: https://github.com/argoproj/argo-cd/issues" +return hs diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/health_test.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/health_test.yaml new file mode 100644 index 0000000..015bc14 --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/health_test.yaml @@ -0,0 +1,33 @@ +tests: +- healthStatus: + status: Healthy + message: "Elasticsearch Cluster status is Green" + inputPath: testdata/ready_green.yaml +- healthStatus: + status: Progressing + message: "Elasticsearch Cluster status is Yellow. Check the status of indices, replicas and shards" + inputPath: testdata/ready_yellow.yaml +- healthStatus: + status: Degraded + message: "Elasticsearch Cluster status is Red. Check the status of indices, replicas and shards" + inputPath: testdata/ready_red.yaml +- healthStatus: + status: Progressing + message: "Elasticsearch phase is ApplyingChanges" + inputPath: testdata/applyingchanges.yaml +- healthStatus: + status: Progressing + message: "Elasticsearch phase is MigratingData" + inputPath: testdata/migratingdata.yaml +- healthStatus: + status: Degraded + message: "Elasticsearch phase is Invalid" + inputPath: testdata/invalid.yaml +- healthStatus: + status: Progressing + message: "The desired amount of availableNodes is 5 but the current amount is 3" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Unknown + message: "Elasticsearch Cluster status is unknown. Ensure your ArgoCD is current and then check for/file a bug report: https://github.com/argoproj/argo-cd/issues" + inputPath: testdata/unknown.yaml diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/applyingchanges.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/applyingchanges.yaml new file mode 100644 index 0000000..af0a98e --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/applyingchanges.yaml @@ -0,0 +1,15 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.12.0 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +status: + availableNodes: 3 + health: green + phase: ApplyingChanges diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/invalid.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/invalid.yaml new file mode 100644 index 0000000..3636f1a --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/invalid.yaml @@ -0,0 +1,15 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.12.0 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +status: + availableNodes: 3 + health: red + phase: Invalid diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/migratingdata.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/migratingdata.yaml new file mode 100644 index 0000000..6fc34cb --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/migratingdata.yaml @@ -0,0 +1,15 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.12.0 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +status: + availableNodes: 3 + health: green + phase: MigratingData diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/progressing.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/progressing.yaml new file mode 100644 index 0000000..43de512 --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/progressing.yaml @@ -0,0 +1,22 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.12.0 + nodeSets: + # 3 dedicated master nodes + - name: master + count: 3 + config: + node.roles: ["master"] + node.remote_cluster_client: false + # 2 ingest-data nodes + - name: ingest-data + count: 2 + config: + node.roles: ["data", "ingest"] +status: + availableNodes: 3 + health: green + phase: Ready diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_green.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_green.yaml new file mode 100644 index 0000000..3db55bb --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_green.yaml @@ -0,0 +1,15 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.12.0 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +status: + availableNodes: 3 + health: green + phase: Ready diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_red.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_red.yaml new file mode 100644 index 0000000..35b0aa0 --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_red.yaml @@ -0,0 +1,15 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.12.0 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +status: + availableNodes: 3 + health: red + phase: Ready diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_yellow.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_yellow.yaml new file mode 100644 index 0000000..c655f19 --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/ready_yellow.yaml @@ -0,0 +1,15 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.12.0 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +status: + availableNodes: 3 + health: yellow + phase: Ready diff --git a/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/unknown.yaml b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/unknown.yaml new file mode 100644 index 0000000..9aac9e0 --- /dev/null +++ b/pkg/resource_customizations/elasticsearch.k8s.elastic.co/Elasticsearch/testdata/unknown.yaml @@ -0,0 +1,12 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.12.0 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +status: {} diff --git a/pkg/resource_customizations/embed.go b/pkg/resource_customizations/embed.go new file mode 100644 index 0000000..8a4d531 --- /dev/null +++ b/pkg/resource_customizations/embed.go @@ -0,0 +1,10 @@ +package resource_customizations + +import ( + "embed" +) + +// Embedded contains embedded resource customization +// +//go:embed * +var Embedded embed.FS diff --git a/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/health.lua b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/health.lua new file mode 100644 index 0000000..b89fbd5 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/health.lua @@ -0,0 +1,25 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + -- For ClusterExternalSecret, new statuses are appended to the end of the list + local lastStatus = obj.status.conditions[#obj.status.conditions] + if lastStatus.type == "Ready" and lastStatus.status == "True" then + hs.status = "Healthy" + hs.message = lastStatus.message + return hs + end + if lastStatus.type == "PartiallyReady" and lastStatus.status == "True" then + hs.status = "Degraded" + hs.message = lastStatus.message + return hs + end + if lastStatus.type == "NotReady" and lastStatus.status == "True" then + hs.status = "Degraded" + hs.message = lastStatus.message + return hs + end + end +end +hs.status = "Progressing" +hs.message = "Waiting for ClusterExternalSecret" +return hs diff --git a/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/health_test.yaml b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/health_test.yaml new file mode 100644 index 0000000..52e4c7c --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/health_test.yaml @@ -0,0 +1,21 @@ +tests: + - healthStatus: + status: Progressing + message: Waiting for ClusterExternalSecret + inputPath: testdata/progressing.yaml + - healthStatus: + status: Degraded + message: 'one or more namespaces failed' + inputPath: testdata/notready.yaml + - healthStatus: + status: Degraded + message: 'one or more namespaces failed' + inputPath: testdata/partiallyready.yaml + - healthStatus: + status: Degraded + message: 'one or more namespaces failed' + inputPath: testdata/partiallyready-multiple-conditions.yaml + - healthStatus: + status: Healthy + message: '' + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/healthy.yaml b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/healthy.yaml new file mode 100644 index 0000000..1a5f61b --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/healthy.yaml @@ -0,0 +1,37 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: ces +spec: + externalSecretName: hello-world-es + externalSecretSpec: + data: + - remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /foo + property: key + secretKey: mykey + refreshInterval: 1h + secretStoreRef: + kind: ClusterSecretStore + name: secretmanager + target: + creationPolicy: Owner + deletionPolicy: Retain + name: mysecret + template: + data: + somekey: '{{ .somecreds }}' + engineVersion: v2 + type: Opaque + namespaceSelector: + matchLabels: + cool: label +status: + conditions: + - message: one or more namespaces failed + status: "True" + type: PartiallyReady + - status: "True" + type: Ready diff --git a/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/notready.yaml b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/notready.yaml new file mode 100644 index 0000000..eeea306 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/notready.yaml @@ -0,0 +1,38 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: ces +spec: + externalSecretName: hello-world-es + externalSecretSpec: + data: + - remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /foo + property: key + secretKey: mykey + refreshInterval: 1h + secretStoreRef: + kind: ClusterSecretStore + name: secretmanager + target: + creationPolicy: Owner + deletionPolicy: Retain + name: mysecret + template: + data: + somekey: '{{ .somecreds }}' + engineVersion: v2 + type: Opaque + namespaceSelector: + matchLabels: + cool: label +status: + conditions: + - message: one or more namespaces failed + status: "True" + type: NotReady + failedNamespaces: + - namespace: default + reason: external secret already exists in namespace diff --git a/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/partiallyready-multiple-conditions.yaml b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/partiallyready-multiple-conditions.yaml new file mode 100644 index 0000000..52f6141 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/partiallyready-multiple-conditions.yaml @@ -0,0 +1,43 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: ces +spec: + externalSecretName: hello-world-es + externalSecretSpec: + data: + - remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /foo + property: key + secretKey: mykey + refreshInterval: 1h + secretStoreRef: + kind: ClusterSecretStore + name: secretmanager + target: + creationPolicy: Owner + deletionPolicy: Retain + name: mysecret + template: + data: + somekey: '{{ .somecreds }}' + engineVersion: v2 + type: Opaque + namespaceSelector: + matchLabels: + cool: label +status: + conditions: + - message: one or more namespaces failed + status: "True" + type: NotReady + - message: one or more namespaces failed + status: "True" + type: PartiallyReady + failedNamespaces: + - namespace: default + reason: external secret already exists in namespace + provisionedNamespaces: + - other-namespace diff --git a/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/partiallyready.yaml b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/partiallyready.yaml new file mode 100644 index 0000000..f6a2915 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/partiallyready.yaml @@ -0,0 +1,40 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: ces +spec: + externalSecretName: hello-world-es + externalSecretSpec: + data: + - remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /foo + property: key + secretKey: mykey + refreshInterval: 1h + secretStoreRef: + kind: ClusterSecretStore + name: secretmanager + target: + creationPolicy: Owner + deletionPolicy: Retain + name: mysecret + template: + data: + somekey: '{{ .somecreds }}' + engineVersion: v2 + type: Opaque + namespaceSelector: + matchLabels: + cool: label +status: + conditions: + - message: one or more namespaces failed + status: "True" + type: PartiallyReady + failedNamespaces: + - namespace: default + reason: external secret already exists in namespace + provisionedNamespaces: + - other-namespace diff --git a/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/progressing.yaml b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/progressing.yaml new file mode 100644 index 0000000..8e326e4 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterExternalSecret/testdata/progressing.yaml @@ -0,0 +1,30 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: ces +spec: + externalSecretName: hello-world-es + externalSecretSpec: + data: + - remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /foo + property: key + secretKey: mykey + refreshInterval: 1h + secretStoreRef: + kind: ClusterSecretStore + name: secretmanager + target: + creationPolicy: Owner + deletionPolicy: Retain + name: mysecret + template: + data: + somekey: '{{ .somecreds }}' + engineVersion: v2 + type: Opaque + namespaceSelector: + matchLabels: + cool: label diff --git a/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/health.lua b/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/health.lua new file mode 100644 index 0000000..4c430f7 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/health.lua @@ -0,0 +1,20 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end +hs.status = "Progressing" +hs.message = "Waiting for ClusterSecretStore" +return hs diff --git a/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/health_test.yaml b/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/health_test.yaml new file mode 100644 index 0000000..6e69215 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Degraded + message: 'unable to validate store' + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: 'store validated' + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/testdata/degraded.yaml b/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/testdata/degraded.yaml new file mode 100644 index 0000000..1f00cc4 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/testdata/degraded.yaml @@ -0,0 +1,16 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: secretmanager +spec: + provider: + aws: + region: us-east-1 + service: SecretsManager +status: + conditions: + - lastTransitionTime: "2023-03-21T22:58:01Z" + message: unable to validate store + reason: ValidationFailed + status: "False" + type: Ready diff --git a/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/testdata/healthy.yaml b/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/testdata/healthy.yaml new file mode 100644 index 0000000..8c99de1 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ClusterSecretStore/testdata/healthy.yaml @@ -0,0 +1,17 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: secretmanager +spec: + provider: + aws: + region: us-east-1 + service: SecretsManager +status: + capabilities: ReadWrite + conditions: + - lastTransitionTime: "2023-03-22T04:51:03Z" + message: store validated + reason: Valid + status: "True" + type: Ready diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/action_test.yaml b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/action_test.yaml new file mode 100644 index 0000000..83f49fc --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/action_test.yaml @@ -0,0 +1,4 @@ +actionTests: + - action: refresh + inputPath: testdata/external-secret.yaml + expectedOutputPath: testdata/external-secret-updated.yaml diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/discovery.lua b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/discovery.lua new file mode 100644 index 0000000..89d806c --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/discovery.lua @@ -0,0 +1,3 @@ +local actions = {} +actions["refresh"] = {["disabled"] = false} +return actions diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/refresh/action.lua b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/refresh/action.lua new file mode 100644 index 0000000..fa29c48 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/refresh/action.lua @@ -0,0 +1,6 @@ +local os = require("os") +if obj.metadata.annotations == nil then + obj.metadata.annotations = {} +end +obj.metadata.annotations["force-sync"] = os.date("!%Y-%m-%dT%XZ") +return obj diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/testdata/external-secret-updated.yaml b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/testdata/external-secret-updated.yaml new file mode 100644 index 0000000..4266e9a --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/testdata/external-secret-updated.yaml @@ -0,0 +1,56 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: ExternalSecret +metadata: + annotations: + force-sync: '0001-01-01T00:00:00Z' + creationTimestamp: '2021-11-16T21:59:33Z' + generation: 1 + name: test-healthy + namespace: argocd + resourceVersion: '136487331' + selfLink: /apis/external-secrets.io/v1alpha1/namespaces/argocd/externalsecrets/test-healthy + uid: 1e754a7e-0781-4d57-932d-4651d5b19586 +spec: + data: + - remoteRef: + key: secret/sa/example + property: api.address + secretKey: url + - remoteRef: + key: secret/sa/example + property: ca.crt + secretKey: ca + - remoteRef: + key: secret/sa/example + property: token + secretKey: token + refreshInterval: 1m + secretStoreRef: + kind: SecretStore + name: example + target: + creationPolicy: Owner + template: + data: + config: | + { + "bearerToken": "{{ .token | base64decode | toString }}", + "tlsClientConfig": { + "insecure": false, + "caData": "{{ .ca | toString }}" + } + } + name: cluster-test + server: '{{ .url | toString }}' + metadata: + labels: + argocd.argoproj.io/secret-type: cluster +status: + conditions: + - lastTransitionTime: '2021-11-16T21:59:34Z' + message: Secret was synced + reason: SecretSynced + status: 'True' + type: Ready + refreshTime: '2021-11-29T18:32:24Z' + syncedResourceVersion: 1-519a61da0dc68b2575b4f8efada70e42 diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/testdata/external-secret.yaml b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/testdata/external-secret.yaml new file mode 100644 index 0000000..da17edb --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/actions/testdata/external-secret.yaml @@ -0,0 +1,54 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: ExternalSecret +metadata: + creationTimestamp: '2021-11-16T21:59:33Z' + generation: 1 + name: test-healthy + namespace: argocd + resourceVersion: '136487331' + selfLink: /apis/external-secrets.io/v1alpha1/namespaces/argocd/externalsecrets/test-healthy + uid: 1e754a7e-0781-4d57-932d-4651d5b19586 +spec: + data: + - remoteRef: + key: secret/sa/example + property: api.address + secretKey: url + - remoteRef: + key: secret/sa/example + property: ca.crt + secretKey: ca + - remoteRef: + key: secret/sa/example + property: token + secretKey: token + refreshInterval: 1m + secretStoreRef: + kind: SecretStore + name: example + target: + creationPolicy: Owner + template: + data: + config: | + { + "bearerToken": "{{ .token | base64decode | toString }}", + "tlsClientConfig": { + "insecure": false, + "caData": "{{ .ca | toString }}" + } + } + name: cluster-test + server: '{{ .url | toString }}' + metadata: + labels: + argocd.argoproj.io/secret-type: cluster +status: + conditions: + - lastTransitionTime: '2021-11-16T21:59:34Z' + message: Secret was synced + reason: SecretSynced + status: 'True' + type: Ready + refreshTime: '2021-11-29T18:32:24Z' + syncedResourceVersion: 1-519a61da0dc68b2575b4f8efada70e42 diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/health.lua b/pkg/resource_customizations/external-secrets.io/ExternalSecret/health.lua new file mode 100644 index 0000000..4def31d --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/health.lua @@ -0,0 +1,20 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end +hs.status = "Progressing" +hs.message = "Waiting for ExternalSecret" +return hs diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/health_test.yaml b/pkg/resource_customizations/external-secrets.io/ExternalSecret/health_test.yaml new file mode 100644 index 0000000..059dfe2 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Progressing + message: Waiting for ExternalSecret + inputPath: testdata/progressing.yaml + - healthStatus: + status: Degraded + message: 'could not get secret data from provider: key "secret/sa/example" from ExternalSecret "test-degraded"' + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: 'Secret was synced' + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/degraded.yaml b/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/degraded.yaml new file mode 100644 index 0000000..db98801 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/degraded.yaml @@ -0,0 +1,52 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: ExternalSecret +metadata: + creationTimestamp: '2021-11-16T21:59:33Z' + generation: 1 + name: test-degraded + namespace: argocd + resourceVersion: '136487331' + selfLink: /apis/external-secrets.io/v1alpha1/namespaces/argocd/externalsecrets/test-degraded + uid: 1e754a7e-0781-4d57-932d-4651d5b19586 +spec: + data: + - remoteRef: + key: secret/sa/example + property: api.address + secretKey: url + - remoteRef: + key: secret/sa/example + property: ca.crt + secretKey: ca + - remoteRef: + key: secret/sa/example + property: token + secretKey: token + refreshInterval: 1m + secretStoreRef: + kind: SecretStore + name: example + target: + creationPolicy: Owner + template: + data: + config: | + { + "bearerToken": "{{ .token | base64decode | toString }}", + "tlsClientConfig": { + "insecure": false, + "caData": "{{ .ca | toString }}" + } + } + name: cluster-test + server: '{{ .url | toString }}' + metadata: + labels: + argocd.argoproj.io/secret-type: cluster +status: + conditions: + - lastTransitionTime: '2021-11-16T21:59:34Z' + message: 'could not get secret data from provider: key "secret/sa/example" from ExternalSecret "test-degraded"' + reason: SecretSyncedError + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/healthy.yaml b/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/healthy.yaml new file mode 100644 index 0000000..da17edb --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/healthy.yaml @@ -0,0 +1,54 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: ExternalSecret +metadata: + creationTimestamp: '2021-11-16T21:59:33Z' + generation: 1 + name: test-healthy + namespace: argocd + resourceVersion: '136487331' + selfLink: /apis/external-secrets.io/v1alpha1/namespaces/argocd/externalsecrets/test-healthy + uid: 1e754a7e-0781-4d57-932d-4651d5b19586 +spec: + data: + - remoteRef: + key: secret/sa/example + property: api.address + secretKey: url + - remoteRef: + key: secret/sa/example + property: ca.crt + secretKey: ca + - remoteRef: + key: secret/sa/example + property: token + secretKey: token + refreshInterval: 1m + secretStoreRef: + kind: SecretStore + name: example + target: + creationPolicy: Owner + template: + data: + config: | + { + "bearerToken": "{{ .token | base64decode | toString }}", + "tlsClientConfig": { + "insecure": false, + "caData": "{{ .ca | toString }}" + } + } + name: cluster-test + server: '{{ .url | toString }}' + metadata: + labels: + argocd.argoproj.io/secret-type: cluster +status: + conditions: + - lastTransitionTime: '2021-11-16T21:59:34Z' + message: Secret was synced + reason: SecretSynced + status: 'True' + type: Ready + refreshTime: '2021-11-29T18:32:24Z' + syncedResourceVersion: 1-519a61da0dc68b2575b4f8efada70e42 diff --git a/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/progressing.yaml b/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/progressing.yaml new file mode 100644 index 0000000..5e55797 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/ExternalSecret/testdata/progressing.yaml @@ -0,0 +1,45 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: ExternalSecret +metadata: + creationTimestamp: '2021-11-16T21:59:33Z' + generation: 1 + name: test-progressing + namespace: argocd + resourceVersion: '136487331' + selfLink: /apis/external-secrets.io/v1alpha1/namespaces/argocd/externalsecrets/test-progressing + uid: 1e754a7e-0781-4d57-932d-4651d5b19586 +spec: + data: + - remoteRef: + key: secret/sa/example + property: api.address + secretKey: url + - remoteRef: + key: secret/sa/example + property: ca.crt + secretKey: ca + - remoteRef: + key: secret/sa/example + property: token + secretKey: token + refreshInterval: 1m + secretStoreRef: + kind: SecretStore + name: example + target: + creationPolicy: Owner + template: + data: + config: | + { + "bearerToken": "{{ .token | base64decode | toString }}", + "tlsClientConfig": { + "insecure": false, + "caData": "{{ .ca | toString }}" + } + } + name: cluster-test + server: '{{ .url | toString }}' + metadata: + labels: + argocd.argoproj.io/secret-type: cluster diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/actions/action_test.yaml b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/action_test.yaml new file mode 100644 index 0000000..457e589 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/action_test.yaml @@ -0,0 +1,4 @@ +actionTests: + - action: push + inputPath: testdata/push-secret.yaml + expectedOutputPath: testdata/push-secret-updated.yaml diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/actions/discovery.lua b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/discovery.lua new file mode 100644 index 0000000..6b095fb --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/discovery.lua @@ -0,0 +1,3 @@ +actions = {} +actions["push"] = {["disabled"] = false} +return actions diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/actions/push/action.lua b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/push/action.lua new file mode 100644 index 0000000..fa29c48 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/push/action.lua @@ -0,0 +1,6 @@ +local os = require("os") +if obj.metadata.annotations == nil then + obj.metadata.annotations = {} +end +obj.metadata.annotations["force-sync"] = os.date("!%Y-%m-%dT%XZ") +return obj diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/actions/testdata/push-secret-updated.yaml b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/testdata/push-secret-updated.yaml new file mode 100644 index 0000000..952f7e9 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/testdata/push-secret-updated.yaml @@ -0,0 +1,41 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + annotations: + force-sync: '0001-01-01T00:00:00Z' + creationTimestamp: '2023-07-05T20:49:16Z' + generation: 1 + name: test-healthy + namespace: external-secret + resourceVersion: '777692391' + uid: 88cb613a-07b0-4fb2-8fdb-d5a5a9c2c917 +spec: + data: + - match: + remoteRef: + property: test + remoteKey: remote/path + secretKey: test + deletionPolicy: None + refreshInterval: 5m + secretStoreRefs: + - kind: ClusterSecretStore + name: my-store + selector: + secret: + name: existing-secret +status: + conditions: + - lastTransitionTime: '2023-07-05T20:49:16Z' + message: PushSecret synced successfully + reason: Synced + status: 'True' + type: Ready + syncedPushSecrets: + ClusterSecretStore/my-store: + remote/path/test: + match: + remoteRef: + property: test + remoteKey: remote/path + secretKey: test diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/actions/testdata/push-secret.yaml b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/testdata/push-secret.yaml new file mode 100644 index 0000000..487233a --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/actions/testdata/push-secret.yaml @@ -0,0 +1,39 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + creationTimestamp: '2023-07-05T20:49:16Z' + generation: 1 + name: test-healthy + namespace: external-secret + resourceVersion: '777692391' + uid: 88cb613a-07b0-4fb2-8fdb-d5a5a9c2c917 +spec: + data: + - match: + remoteRef: + property: test + remoteKey: remote/path + secretKey: test + deletionPolicy: None + refreshInterval: 5m + secretStoreRefs: + - kind: ClusterSecretStore + name: my-store + selector: + secret: + name: existing-secret +status: + conditions: + - lastTransitionTime: '2023-07-05T20:49:16Z' + message: PushSecret synced successfully + reason: Synced + status: 'True' + type: Ready + syncedPushSecrets: + ClusterSecretStore/my-store: + remote/path/test: + match: + remoteRef: + property: test + remoteKey: remote/path + secretKey: test diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/health.lua b/pkg/resource_customizations/external-secrets.io/PushSecret/health.lua new file mode 100644 index 0000000..d86cb4c --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/health.lua @@ -0,0 +1,20 @@ +hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end +hs.status = "Progressing" +hs.message = "Waiting for PushSecret" +return hs diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/health_test.yaml b/pkg/resource_customizations/external-secrets.io/PushSecret/health_test.yaml new file mode 100644 index 0000000..07d6ab3 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Progressing + message: Waiting for PushSecret + inputPath: testdata/progressing.yaml + - healthStatus: + status: Degraded + message: 'set secret failed: could not write remote ref test to target secretstore my-store: Error making API request.' + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: 'PushSecret synced successfully' + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/degraded.yaml b/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/degraded.yaml new file mode 100644 index 0000000..aab422e --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/degraded.yaml @@ -0,0 +1,33 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + creationTimestamp: '2023-07-05T20:49:16Z' + generation: 1 + name: test-degraded + namespace: external-secret + resourceVersion: '777692391' + uid: 88cb613a-07b0-4fb2-8fdb-d5a5a9c2c917 +spec: + data: + - match: + remoteRef: + property: test + remoteKey: remote/path + secretKey: test + deletionPolicy: None + refreshInterval: 5m + secretStoreRefs: + - kind: ClusterSecretStore + name: my-store + selector: + secret: + name: existing-secret +status: + conditions: + - lastTransitionTime: '2023-07-05T20:49:16Z' + message: 'set secret failed: could not write remote ref test to target secretstore my-store: Error making API request.' + reason: Errored + status: 'False' + type: Ready + syncedPushSecrets: + ClusterSecretStore/my-store: {} diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/healthy.yaml b/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/healthy.yaml new file mode 100644 index 0000000..487233a --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/healthy.yaml @@ -0,0 +1,39 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + creationTimestamp: '2023-07-05T20:49:16Z' + generation: 1 + name: test-healthy + namespace: external-secret + resourceVersion: '777692391' + uid: 88cb613a-07b0-4fb2-8fdb-d5a5a9c2c917 +spec: + data: + - match: + remoteRef: + property: test + remoteKey: remote/path + secretKey: test + deletionPolicy: None + refreshInterval: 5m + secretStoreRefs: + - kind: ClusterSecretStore + name: my-store + selector: + secret: + name: existing-secret +status: + conditions: + - lastTransitionTime: '2023-07-05T20:49:16Z' + message: PushSecret synced successfully + reason: Synced + status: 'True' + type: Ready + syncedPushSecrets: + ClusterSecretStore/my-store: + remote/path/test: + match: + remoteRef: + property: test + remoteKey: remote/path + secretKey: test diff --git a/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/progressing.yaml b/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/progressing.yaml new file mode 100644 index 0000000..e67d679 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/PushSecret/testdata/progressing.yaml @@ -0,0 +1,24 @@ +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + creationTimestamp: '2023-07-05T20:49:16Z' + generation: 1 + name: test-progressing + namespace: external-secret + resourceVersion: '777692391' + uid: 88cb613a-07b0-4fb2-8fdb-d5a5a9c2c917 +spec: + data: + - match: + remoteRef: + property: test + remoteKey: remote/path + secretKey: test + deletionPolicy: None + refreshInterval: 5m + secretStoreRefs: + - kind: ClusterSecretStore + name: my-store + selector: + secret: + name: existing-secret diff --git a/pkg/resource_customizations/external-secrets.io/SecretStore/health.lua b/pkg/resource_customizations/external-secrets.io/SecretStore/health.lua new file mode 100644 index 0000000..656c8a3 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/SecretStore/health.lua @@ -0,0 +1,20 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end +hs.status = "Progressing" +hs.message = "Waiting for SecretStore" +return hs diff --git a/pkg/resource_customizations/external-secrets.io/SecretStore/health_test.yaml b/pkg/resource_customizations/external-secrets.io/SecretStore/health_test.yaml new file mode 100644 index 0000000..6e69215 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/SecretStore/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Degraded + message: 'unable to validate store' + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: 'store validated' + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/external-secrets.io/SecretStore/testdata/degraded.yaml b/pkg/resource_customizations/external-secrets.io/SecretStore/testdata/degraded.yaml new file mode 100644 index 0000000..6fe5150 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/SecretStore/testdata/degraded.yaml @@ -0,0 +1,24 @@ +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: vault +spec: + provider: + kubernetes: + auth: + serviceAccount: + name: vault + remoteNamespace: vault + server: + caProvider: + key: ca.crt + name: kube-root-ca.crt + type: ConfigMap + url: https://kubernetes.default +status: + conditions: + - lastTransitionTime: "2022-10-05T13:59:22Z" + message: unable to validate store + reason: ValidationFailed + status: "False" + type: Ready diff --git a/pkg/resource_customizations/external-secrets.io/SecretStore/testdata/healthy.yaml b/pkg/resource_customizations/external-secrets.io/SecretStore/testdata/healthy.yaml new file mode 100644 index 0000000..e93ca88 --- /dev/null +++ b/pkg/resource_customizations/external-secrets.io/SecretStore/testdata/healthy.yaml @@ -0,0 +1,29 @@ +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: vault +spec: + provider: + vault: + auth: + kubernetes: + mountPath: foobar + role: vault + secretRef: + key: token + name: vault + caBundle: foobar + namespace: pki + path: secret + server: https://foo.bar + version: v2 + retrySettings: + maxRetries: 5 + retryInterval: 10s +status: + conditions: + - lastTransitionTime: "2022-10-06T15:48:42Z" + message: store validated + reason: Valid + status: "True" + type: Ready diff --git a/pkg/resource_customizations/flagger.app/Canary/health.lua b/pkg/resource_customizations/flagger.app/Canary/health.lua new file mode 100644 index 0000000..dd32bbc --- /dev/null +++ b/pkg/resource_customizations/flagger.app/Canary/health.lua @@ -0,0 +1,31 @@ +local sep = " --- " +local hs = {} +if obj.status ~= nil then + local message = "" + if tonumber(obj.status.canaryWeight) > 0 then + message = "Canary Weight: " .. obj.status.canaryWeight .. " %" + end + for i, condition in ipairs(obj.status.conditions) do + if message ~= "" then + message = message .. sep + end + message = message .. condition.message + end + if obj.status.phase == "Failed" then + hs.status = "Degraded" + elseif ( obj.status.phase == "Progressing" or + obj.status.phase == "Finalising" or + obj.status.phase == "Promoting" ) then + hs.status = "Progressing" + elseif ( obj.status.phase == "Succeeded" or + obj.status.phase == "Initialized" ) then + hs.status = "Healthy" + else + hs.status = "Unknown" + end + hs.message = obj.status.phase .. sep .. message + return hs +end +hs.status = "Unknown" +hs.message = "No status" +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/flagger.app/Canary/health_test.yaml b/pkg/resource_customizations/flagger.app/Canary/health_test.yaml new file mode 100644 index 0000000..7851abb --- /dev/null +++ b/pkg/resource_customizations/flagger.app/Canary/health_test.yaml @@ -0,0 +1,25 @@ +tests: +- healthStatus: + status: Progressing + message: "Progressing --- Canary Weight: 10 % --- New revision detected, progressing canary analysis." + inputPath: testdata/progressing.yaml +- healthStatus: + status: Progressing + message: "Finalising --- Canary analysis completed, routing all traffic to primary." + inputPath: testdata/finalising.yaml +- healthStatus: + status: Progressing + message: "Promoting --- Canary analysis completed, starting primary rolling update." + inputPath: testdata/promoting.yaml +- healthStatus: + status: Degraded + message: "Failed --- Canary analysis failed, Deployment scaled to zero." + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "Succeeded --- Canary analysis completed successfully, promotion finished." + inputPath: testdata/succeeded.yaml +- healthStatus: + status: Healthy + message: "Initialized --- Installation successful." + inputPath: testdata/initialized.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/flagger.app/Canary/testdata/degraded.yaml b/pkg/resource_customizations/flagger.app/Canary/testdata/degraded.yaml new file mode 100644 index 0000000..3d374a2 --- /dev/null +++ b/pkg/resource_customizations/flagger.app/Canary/testdata/degraded.yaml @@ -0,0 +1,29 @@ +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: podinfo + name: podinfo + namespace: default + resourceVersion: "2268395" + selfLink: /apis/flagger.app/v1beta1/namespaces/default/canaries/podinfo + uid: 82df0136-0248-4a95-9c60-3184792614ea +spec: {} +status: + canaryWeight: 0 + conditions: + - lastTransitionTime: "2020-07-03T19:13:47Z" + lastUpdateTime: "2020-07-03T19:13:47Z" + message: Canary analysis failed, Deployment scaled to zero. + reason: Failed + status: "False" + type: Promoted + failedChecks: 0 + iterations: 0 + lastAppliedSpec: 765c47b8f7 + lastPromotedSpec: fc74df5fc + lastTransitionTime: "2020-07-03T19:13:47Z" + phase: Failed + trackedConfigs: {} + diff --git a/pkg/resource_customizations/flagger.app/Canary/testdata/finalising.yaml b/pkg/resource_customizations/flagger.app/Canary/testdata/finalising.yaml new file mode 100644 index 0000000..f495888 --- /dev/null +++ b/pkg/resource_customizations/flagger.app/Canary/testdata/finalising.yaml @@ -0,0 +1,28 @@ +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: podinfo + name: podinfo + namespace: default + resourceVersion: "2268395" + selfLink: /apis/flagger.app/v1beta1/namespaces/default/canaries/podinfo + uid: 82df0136-0248-4a95-9c60-3184792614ea +spec: {} +status: + canaryWeight: 0 + conditions: + - lastTransitionTime: "2020-07-03T18:53:02Z" + lastUpdateTime: "2020-07-03T18:55:12Z" + message: Canary analysis completed, routing all traffic to primary. + reason: Finalising + status: Unknown + type: Promoted + failedChecks: 0 + iterations: 0 + lastAppliedSpec: fc74df5fc + lastPromotedSpec: 744b467645 + lastTransitionTime: "2020-07-03T18:55:12Z" + phase: Finalising + trackedConfigs: {} \ No newline at end of file diff --git a/pkg/resource_customizations/flagger.app/Canary/testdata/initialized.yaml b/pkg/resource_customizations/flagger.app/Canary/testdata/initialized.yaml new file mode 100644 index 0000000..effd8f8 --- /dev/null +++ b/pkg/resource_customizations/flagger.app/Canary/testdata/initialized.yaml @@ -0,0 +1,28 @@ +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: podinfo + name: podinfo + namespace: default + resourceVersion: "2268395" + selfLink: /apis/flagger.app/v1beta1/namespaces/default/canaries/podinfo + uid: 82df0136-0248-4a95-9c60-3184792614ea +spec: {} +status: + canaryWeight: 0 + conditions: + - lastTransitionTime: "2020-07-03T13:36:22Z" + lastUpdateTime: "2020-07-03T13:36:22Z" + message: Installation successful. + reason: Initialized + status: "True" + type: Promoted + failedChecks: 0 + iterations: 0 + lastAppliedSpec: 658bbf784f + lastPromotedSpec: 658bbf784f + lastTransitionTime: "2020-07-03T13:36:22Z" + phase: Initialized + trackedConfigs: {} diff --git a/pkg/resource_customizations/flagger.app/Canary/testdata/progressing.yaml b/pkg/resource_customizations/flagger.app/Canary/testdata/progressing.yaml new file mode 100644 index 0000000..771d4b3 --- /dev/null +++ b/pkg/resource_customizations/flagger.app/Canary/testdata/progressing.yaml @@ -0,0 +1,28 @@ +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: podinfo + name: podinfo + namespace: default + resourceVersion: "2268395" + selfLink: /apis/flagger.app/v1beta1/namespaces/default/canaries/podinfo + uid: 82df0136-0248-4a95-9c60-3184792614ea +spec: {} +status: + canaryWeight: 10 + conditions: + - lastTransitionTime: "2020-07-03T18:46:52Z" + lastUpdateTime: "2020-07-03T18:46:52Z" + message: New revision detected, progressing canary analysis. + reason: Progressing + status: Unknown + type: Promoted + failedChecks: 0 + iterations: 0 + lastAppliedSpec: 5c75b466fb + lastPromotedSpec: 658bbf784f + lastTransitionTime: "2020-07-03T18:47:02Z" + phase: Progressing + trackedConfigs: {} diff --git a/pkg/resource_customizations/flagger.app/Canary/testdata/promoting.yaml b/pkg/resource_customizations/flagger.app/Canary/testdata/promoting.yaml new file mode 100644 index 0000000..6486b1c --- /dev/null +++ b/pkg/resource_customizations/flagger.app/Canary/testdata/promoting.yaml @@ -0,0 +1,28 @@ +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: podinfo + name: podinfo + namespace: default + resourceVersion: "2268395" + selfLink: /apis/flagger.app/v1beta1/namespaces/default/canaries/podinfo + uid: 82df0136-0248-4a95-9c60-3184792614ea +spec: {} +status: + canaryWeight: 0 + conditions: + - lastTransitionTime: "2020-07-03T18:53:02Z" + lastUpdateTime: "2020-07-03T18:55:02Z" + message: Canary analysis completed, starting primary rolling update. + reason: Promoting + status: Unknown + type: Promoted + failedChecks: 0 + iterations: 0 + lastAppliedSpec: fc74df5fc + lastPromotedSpec: 744b467645 + lastTransitionTime: "2020-07-03T18:55:02Z" + phase: Promoting + trackedConfigs: {} \ No newline at end of file diff --git a/pkg/resource_customizations/flagger.app/Canary/testdata/succeeded.yaml b/pkg/resource_customizations/flagger.app/Canary/testdata/succeeded.yaml new file mode 100644 index 0000000..5ccc01f --- /dev/null +++ b/pkg/resource_customizations/flagger.app/Canary/testdata/succeeded.yaml @@ -0,0 +1,28 @@ +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: podinfo + name: podinfo + namespace: default + resourceVersion: "2268395" + selfLink: /apis/flagger.app/v1beta1/namespaces/default/canaries/podinfo + uid: 82df0136-0248-4a95-9c60-3184792614ea +spec: {} +status: + canaryWeight: 0 + conditions: + - lastTransitionTime: "2020-07-03T13:36:22Z" + lastUpdateTime: "2020-07-03T13:36:22Z" + message: Canary analysis completed successfully, promotion finished. + reason: Succeeded + status: "True" + type: Promoted + failedChecks: 0 + iterations: 0 + lastAppliedSpec: 658bbf784f + lastPromotedSpec: 658bbf784f + lastTransitionTime: "2020-07-03T13:36:22Z" + phase: Succeeded + trackedConfigs: {} diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/health.lua b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/health.lua new file mode 100644 index 0000000..677f1a7 --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/health.lua @@ -0,0 +1,24 @@ +local health_status = {} + +if obj.status ~= nil and obj.status.reconciliationStatus ~= nil then + if obj.status.reconciliationStatus.success or obj.status.reconciliationStatus.state == "DEPLOYED" then + health_status.status = "Healthy" + return health_status + end + + if obj.status.jobManagerDeploymentStatus == "DEPLOYED_NOT_READY" or obj.status.jobManagerDeploymentStatus == "DEPLOYING" then + health_status.status = "Progressing" + health_status.message = "Waiting for deploying" + return health_status + end + + if obj.status.jobManagerDeploymentStatus == "ERROR" then + health_status.status = "Degraded" + health_status.message = obj.status.reconciliationStatus.error + return health_status + end +end + +health_status.status = "Progressing" +health_status.message = "Waiting for Flink operator" +return health_status diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/health_test.yaml b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/health_test.yaml new file mode 100644 index 0000000..25a4f12 --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/health_test.yaml @@ -0,0 +1,25 @@ +tests: +- healthStatus: + status: Healthy + inputPath: testdata/healthy_running_v0.1.x.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy_running_v1.x.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy_suspended_v0.1.x.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy_suspended_v1.x.yaml +- healthStatus: + status: Progressing + message: Waiting for deploying + inputPath: testdata/progressing_deployedNotReady.yaml +- healthStatus: + status: Progressing + message: Waiting for deploying + inputPath: testdata/progressing_deploying.yaml +- healthStatus: + status: Degraded + message: "org.apache.flink.configuration.IllegalConfigurationException: TaskManager memory configuration failed: Cannot read memory size from config option ''taskmanager.memory.network.max''." + inputPath: testdata/degraded_error.yaml diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/degraded_error.yaml b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/degraded_error.yaml new file mode 100644 index 0000000..dc9ce97 --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/degraded_error.yaml @@ -0,0 +1,8 @@ +apiVersion: flink.apache.org/v1alpha1 +kind: FlinkDeployment +spec: {} +status: + jobManagerDeploymentStatus: ERROR + reconciliationStatus: + error: "org.apache.flink.configuration.IllegalConfigurationException: TaskManager memory configuration failed: Cannot read memory size from config option ''taskmanager.memory.network.max''." + success: false diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_running_v0.1.x.yaml b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_running_v0.1.x.yaml new file mode 100644 index 0000000..879dc34 --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_running_v0.1.x.yaml @@ -0,0 +1,11 @@ +apiVersion: flink.apache.org/v1alpha1 +kind: FlinkDeployment +spec: + job: + state: running +status: + jobManagerDeploymentStatus: READY + jobStatus: + state: RUNNING + reconciliationStatus: + success: true diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_running_v1.x.yaml b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_running_v1.x.yaml new file mode 100644 index 0000000..60e2c2b --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_running_v1.x.yaml @@ -0,0 +1,11 @@ +apiVersion: flink.apache.org/v1alpha1 +kind: FlinkDeployment +spec: + job: + state: running +status: + jobManagerDeploymentStatus: READY + jobStatus: + state: RUNNING + reconciliationStatus: + state: DEPLOYED diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_suspended_v0.1.x.yaml b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_suspended_v0.1.x.yaml new file mode 100644 index 0000000..b49b313 --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_suspended_v0.1.x.yaml @@ -0,0 +1,11 @@ +apiVersion: flink.apache.org/v1alpha1 +kind: FlinkDeployment +spec: + job: + state: suspended +status: + jobManagerDeploymentStatus: MISSING + jobStatus: + state: SUSPENDED + reconciliationStatus: + success: true diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_suspended_v1.x.yaml b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_suspended_v1.x.yaml new file mode 100644 index 0000000..023c289 --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/healthy_suspended_v1.x.yaml @@ -0,0 +1,11 @@ +apiVersion: flink.apache.org/v1alpha1 +kind: FlinkDeployment +spec: + job: + state: suspended +status: + jobManagerDeploymentStatus: MISSING + jobStatus: + state: SUSPENDED + reconciliationStatus: + state: DEPLOYED diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/progressing_deployedNotReady.yaml b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/progressing_deployedNotReady.yaml new file mode 100644 index 0000000..33bee63 --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/progressing_deployedNotReady.yaml @@ -0,0 +1,6 @@ +apiVersion: flink.apache.org/v1alpha1 +kind: FlinkDeployment +spec: {} +status: + jobManagerDeploymentStatus: DEPLOYED_NOT_READY + reconciliationStatus: {} diff --git a/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/progressing_deploying.yaml b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/progressing_deploying.yaml new file mode 100644 index 0000000..7792c69 --- /dev/null +++ b/pkg/resource_customizations/flink.apache.org/FlinkDeployment/testdata/progressing_deploying.yaml @@ -0,0 +1,6 @@ +apiVersion: flink.apache.org/v1alpha1 +kind: FlinkDeployment +spec: {} +status: + jobManagerDeploymentStatus: DEPLOYING + reconciliationStatus: {} diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/health.lua b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/health_test.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/dependency_not_found.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..556376c --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPartialPolicy +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..6667133 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPartialPolicy +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/up_to_date.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/up_to_date.yaml new file mode 100644 index 0000000..93b5886 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPartialPolicy +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/update_failed.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/update_failed.yaml new file mode 100644 index 0000000..d1de92b --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPartialPolicy +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/update_in_progress.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/update_in_progress.yaml new file mode 100644 index 0000000..fd8d422 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPartialPolicy/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPartialPolicy +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/health.lua b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/health_test.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/dependency_not_found.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..439546a --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicy +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..03d48f0 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicy +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/up_to_date.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/up_to_date.yaml new file mode 100644 index 0000000..1b1b946 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicy +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/update_failed.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/update_failed.yaml new file mode 100644 index 0000000..786f2e5 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicy +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/update_in_progress.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/update_in_progress.yaml new file mode 100644 index 0000000..d218393 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicy/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicy +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/health.lua b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/health_test.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/dependency_not_found.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..f822d7e --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..e5e3e6a --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/up_to_date.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/up_to_date.yaml new file mode 100644 index 0000000..35575a1 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/update_failed.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/update_failed.yaml new file mode 100644 index 0000000..8c89620 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/update_in_progress.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/update_in_progress.yaml new file mode 100644 index 0000000..8d7208e --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMPolicyMember/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/health.lua b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/health_test.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/dependency_not_found.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..f05d74c --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..0e7962f --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/up_to_date.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/up_to_date.yaml new file mode 100644 index 0000000..6124232 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/update_failed.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/update_failed.yaml new file mode 100644 index 0000000..f6c6e05 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/update_in_progress.yaml b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/update_in_progress.yaml new file mode 100644 index 0000000..f313ba6 --- /dev/null +++ b/pkg/resource_customizations/iam.cnrm.cloud.google.com/IAMServiceAccount/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/health.lua b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/health.lua new file mode 100644 index 0000000..0b89687 --- /dev/null +++ b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/health.lua @@ -0,0 +1,33 @@ +local hs = {} +if obj.status ~= nil then + -- Each message may or may not use these. + local roleName = obj.status.roleName or "" + local roleARN = obj.status.roleARN or "" + local roleID = obj.status.roleID or "" + + if obj.status.state == "Ready" then + hs.status = "Healthy" + hs.message = "Role '" .. roleName .. "' exists with ARN '" .. roleARN .. "' and ID '" .. roleID .. "'." + return hs + end + + local message = "" + -- Current non-ready statuses: https://github.com/keikoproj/iam-manager/blob/3aeb2f8ec3005e1c53a057b3b0f79e14a0e5b9cb/api/v1alpha1/iamrole_types.go#L150-L156 + if obj.status.state == "Error" or obj.status.state == "RolesMaxLimitReached" or obj.status.state == "PolicyNotAllowed" or obj.status.state == "RoleNameNotAvailable" then + hs.status = "Degraded" + message = "Failed to reconcile the Iamrole " + if obj.status.retryCount ~= nil and obj.status.retryCount > 0 then + message = message .. "(retry " .. tostring(obj.status.retryCount) .. ") " + end + message = message .. "for role '" .. roleName .. "' with ARN '" .. roleARN .. "' and ID '" .. roleID .. "'." + if obj.status.errorDescription ~= nil then + message = message .. " Reconciliation error was: " .. obj.status.errorDescription + end + hs.message = message + return hs + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Iamrole to be reconciled" +return hs diff --git a/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/health_test.yaml b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/health_test.yaml new file mode 100644 index 0000000..660276f --- /dev/null +++ b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/health_test.yaml @@ -0,0 +1,20 @@ +tests: +- healthStatus: + status: Degraded + message: |- + Failed to reconcile the Iamrole (retry 1) for role 'k8s-test' with ARN 'arn:aws:iam::111111111111:role/k8s-test' and ID 'ABCDEFGHIJKLMNOPQRSTU'. Reconciliation error was: NoSuchEntity: The role with name k8s-test cannot be found. + status code: 404, request id: f80c99fc-c78d-4b1c-806d-3a162fbbc900 + inputPath: testdata/degraded_error.yaml +- healthStatus: + status: Degraded + message: |- + Failed to reconcile the Iamrole for role 'k8s-test' with ARN '' and ID ''. Reconciliation error was: maximum number of allowed roles reached. You must delete any existing role before proceeding further + inputPath: testdata/degraded_rolesMaxLimitReached.yaml +- healthStatus: + status: Healthy + message: Role 'k8s-test' exists with ARN 'arn:aws:iam::111111111111:role/k8s-test' and ID 'ABCDEFGHIJKLMNOPQRSTU'. + inputPath: testdata/healthy.yaml +- healthStatus: + status: Progressing + message: 'Waiting for Iamrole to be reconciled' + inputPath: testdata/progressing_noStatus.yaml diff --git a/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/degraded_error.yaml b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/degraded_error.yaml new file mode 100644 index 0000000..3bf3e7c --- /dev/null +++ b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/degraded_error.yaml @@ -0,0 +1,29 @@ +apiVersion: iammanager.keikoproj.io/v1alpha1 +kind: Iamrole +metadata: + finalizers: + - iamrole.finalizers.iammanager.keikoproj.io + name: iamrole + namespace: test +spec: + PolicyDocument: + Statement: + - Action: + - ec2:* + Effect: Deny + Resource: + - '*' + - Action: + - iam:* + Effect: Deny + Resource: + - '*' +status: + errorDescription: "NoSuchEntity: The role with name k8s-test cannot + be found.\n\tstatus code: 404, request id: f80c99fc-c78d-4b1c-806d-3a162fbbc900" + lastUpdatedTimestamp: "2023-10-10T19:31:06Z" + retryCount: 1 + roleARN: arn:aws:iam::111111111111:role/k8s-test + roleID: ABCDEFGHIJKLMNOPQRSTU + roleName: k8s-test + state: Error diff --git a/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/degraded_rolesMaxLimitReached.yaml b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/degraded_rolesMaxLimitReached.yaml new file mode 100644 index 0000000..906c720 --- /dev/null +++ b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/degraded_rolesMaxLimitReached.yaml @@ -0,0 +1,26 @@ +apiVersion: iammanager.keikoproj.io/v1alpha1 +kind: Iamrole +metadata: + finalizers: + - iamrole.finalizers.iammanager.keikoproj.io + name: iamrole + namespace: test +spec: + PolicyDocument: + Statement: + - Action: + - ec2:* + Effect: Deny + Resource: + - '*' + - Action: + - iam:* + Effect: Deny + Resource: + - '*' +status: + errorDescription: maximum number of allowed roles reached. You must delete any existing role before proceeding further + lastUpdatedTimestamp: "2023-10-10T19:25:26Z" + retryCount: 0 + roleName: k8s-test + state: RolesMaxLimitReached diff --git a/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/healthy.yaml b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/healthy.yaml new file mode 100644 index 0000000..273cf3a --- /dev/null +++ b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/healthy.yaml @@ -0,0 +1,27 @@ +apiVersion: iammanager.keikoproj.io/v1alpha1 +kind: Iamrole +metadata: + finalizers: + - iamrole.finalizers.iammanager.keikoproj.io + name: iamrole + namespace: default +spec: + PolicyDocument: + Statement: + - Action: + - 'ec2:*' + Effect: Deny + Resource: + - '*' + - Action: + - 'iam:*' + Effect: Deny + Resource: + - '*' +status: + lastUpdatedTimestamp: '2023-10-10T20:36:23Z' + retryCount: 0 + roleARN: 'arn:aws:iam::111111111111:role/k8s-test' + roleID: ABCDEFGHIJKLMNOPQRSTU + roleName: k8s-test + state: Ready diff --git a/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..29c18a4 --- /dev/null +++ b/pkg/resource_customizations/iammanager.keikoproj.io/Iamrole/testdata/progressing_noStatus.yaml @@ -0,0 +1,20 @@ +apiVersion: iammanager.keikoproj.io/v1alpha1 +kind: Iamrole +metadata: + finalizers: + - iamrole.finalizers.iammanager.keikoproj.io + name: iamrole + namespace: default +spec: + PolicyDocument: + Statement: + - Action: + - 'ec2:*' + Effect: Deny + Resource: + - '*' + - Action: + - 'iam:*' + Effect: Deny + Resource: + - '*' diff --git a/pkg/resource_customizations/install.istio.io/IstioOperator/health.lua b/pkg/resource_customizations/install.istio.io/IstioOperator/health.lua new file mode 100644 index 0000000..874ff3c --- /dev/null +++ b/pkg/resource_customizations/install.istio.io/IstioOperator/health.lua @@ -0,0 +1,33 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.status ~= nil then + if obj.status.status == 0 or obj.status.status == "NONE" then + health_status.status = "Unknown" + health_status.message = "Component is not present." + return health_status + end + if obj.status.status == 1 or obj.status.status == "UPDATING" then + health_status.status = "Progressing" + health_status.message = "Component is being updated to a different version." + return health_status + end + if obj.status.status == 2 or obj.status.status == "RECONCILING" then + health_status.status = "Progressing" + health_status.message = "Controller has started but not yet completed reconciliation loop for the component." + return health_status + end + if obj.status.status == 3 or obj.status.status == "HEALTHY" then + health_status.status = "Healthy" + health_status.message = "Component is healthy." + return health_status + end + if obj.status.status == 4 or obj.status.status == "ERROR" then + health_status.status = "Degraded" + health_status.message = "Component is in an error state." + return health_status + end + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for Istio Control Plane" +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/install.istio.io/IstioOperator/health_test.yaml b/pkg/resource_customizations/install.istio.io/IstioOperator/health_test.yaml new file mode 100644 index 0000000..689b66b --- /dev/null +++ b/pkg/resource_customizations/install.istio.io/IstioOperator/health_test.yaml @@ -0,0 +1,17 @@ +tests: +- healthStatus: + status: Progressing + message: "Component is being updated to a different version." + inputPath: testdata/progressing_updating.yaml +- healthStatus: + status: Progressing + message: "Controller has started but not yet completed reconciliation loop for the component." + inputPath: testdata/progressing_reconciling.yaml +- healthStatus: + status: Degraded + message: "Component is in an error state." + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "Component is healthy." + inputPath: testdata/healthy.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/degraded.yaml b/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/degraded.yaml new file mode 100644 index 0000000..f2a5d5e --- /dev/null +++ b/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/degraded.yaml @@ -0,0 +1,25 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + finalizers: + - istio-finalizer.install.istio.io + generation: 1 + labels: + argocd.argoproj.io/instance: istio-default + name: istio-control-plane + namespace: istio-system + resourceVersion: "270068" + selfLink: /apis/install.istio.io/v1alpha1/namespaces/istio-system/istiooperators/istio-control-plane + uid: d4ff8619-f3b0-4fb3-8bdb-a44ff44a401a +spec: {} +status: + componentStatus: + AddonComponents: + status: HEALTHY + Base: + status: ERROR + IngressGateways: + status: HEALTHY + Pilot: + status: HEALTHY + status: ERROR \ No newline at end of file diff --git a/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/healthy.yaml b/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/healthy.yaml new file mode 100644 index 0000000..3f47d94 --- /dev/null +++ b/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/healthy.yaml @@ -0,0 +1,26 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + creationTimestamp: "2020-06-04T17:46:57Z" + finalizers: + - istio-finalizer.install.istio.io + generation: 1 + labels: + argocd.argoproj.io/instance: istio-default + name: istio-control-plane + namespace: istio-system + resourceVersion: "270068" + selfLink: /apis/install.istio.io/v1alpha1/namespaces/istio-system/istiooperators/istio-control-plane + uid: d4ff8619-f3b0-4fb3-8bdb-a44ff44a401a +spec: {} +status: + componentStatus: + AddonComponents: + status: HEALTHY + Base: + status: HEALTHY + IngressGateways: + status: HEALTHY + Pilot: + status: HEALTHY + status: HEALTHY \ No newline at end of file diff --git a/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/progressing_reconciling.yaml b/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/progressing_reconciling.yaml new file mode 100644 index 0000000..d7c69fb --- /dev/null +++ b/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/progressing_reconciling.yaml @@ -0,0 +1,26 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + creationTimestamp: "2020-06-04T17:46:57Z" + finalizers: + - istio-finalizer.install.istio.io + generation: 1 + labels: + argocd.argoproj.io/instance: istio-default + name: istio-control-plane + namespace: istio-system + resourceVersion: "270068" + selfLink: /apis/install.istio.io/v1alpha1/namespaces/istio-system/istiooperators/istio-control-plane + uid: d4ff8619-f3b0-4fb3-8bdb-a44ff44a401a +spec: {} +status: + componentStatus: + AddonComponents: + status: HEALTHY + Base: + status: RECONCILING + IngressGateways: + status: HEALTHY + Pilot: + status: HEALTHY + status: RECONCILING \ No newline at end of file diff --git a/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/progressing_updating.yaml b/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/progressing_updating.yaml new file mode 100644 index 0000000..499c0c0 --- /dev/null +++ b/pkg/resource_customizations/install.istio.io/IstioOperator/testdata/progressing_updating.yaml @@ -0,0 +1,25 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + finalizers: + - istio-finalizer.install.istio.io + generation: 1 + labels: + argocd.argoproj.io/instance: istio-default + name: istio-control-plane + namespace: istio-system + resourceVersion: "270068" + selfLink: /apis/install.istio.io/v1alpha1/namespaces/istio-system/istiooperators/istio-control-plane + uid: d4ff8619-f3b0-4fb3-8bdb-a44ff44a401a +spec: {} +status: + componentStatus: + AddonComponents: + status: HEALTHY + Base: + status: HEALTHY + IngressGateways: + status: UPDATING + Pilot: + status: HEALTHY + status: UPDATING \ No newline at end of file diff --git a/pkg/resource_customizations/jaegertracing.io/Jaeger/health.lua b/pkg/resource_customizations/jaegertracing.io/Jaeger/health.lua new file mode 100644 index 0000000..b7514d5 --- /dev/null +++ b/pkg/resource_customizations/jaegertracing.io/Jaeger/health.lua @@ -0,0 +1,16 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.phase == "Running" then + health_status.status = "Healthy" + health_status.message = "Jaeger is Running" + return health_status + end + if obj.status.phase == "Failed" then + health_status.status = "Degraded" + health_status.message = "Jaeger Failed For Some Reason" + return health_status + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for Jaeger" +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/jaegertracing.io/Jaeger/health_test.yaml b/pkg/resource_customizations/jaegertracing.io/Jaeger/health_test.yaml new file mode 100644 index 0000000..60cfa92 --- /dev/null +++ b/pkg/resource_customizations/jaegertracing.io/Jaeger/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for Jaeger" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "Jaeger Failed For Some Reason" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "Jaeger is Running" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/degraded.yaml b/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/degraded.yaml new file mode 100644 index 0000000..f84ccc3 --- /dev/null +++ b/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/degraded.yaml @@ -0,0 +1,15 @@ +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + generation: 3 + labels: + argocd.argoproj.io/instance: jaeger-operator-default + name: jaeger + namespace: jaeger + resourceVersion: "226432" + selfLink: /apis/jaegertracing.io/v1/namespaces/2269-jaeger/jaegers/jaeger + uid: 48d186c8-47d7-494b-8b6a-435747e406a4 +spec: {} +status: + phase: Failed + version: 1.17.0 \ No newline at end of file diff --git a/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/healthy.yaml b/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/healthy.yaml new file mode 100644 index 0000000..0f3fd4a --- /dev/null +++ b/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/healthy.yaml @@ -0,0 +1,15 @@ +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + generation: 3 + labels: + argocd.argoproj.io/instance: jaeger-operator-default + name: jaeger + namespace: jaeger + resourceVersion: "226432" + selfLink: /apis/jaegertracing.io/v1/namespaces/2269-jaeger/jaegers/jaeger + uid: 48d186c8-47d7-494b-8b6a-435747e406a4 +spec: {} +status: + phase: Running + version: 1.17.0 \ No newline at end of file diff --git a/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/progressing.yaml b/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/progressing.yaml new file mode 100644 index 0000000..7318564 --- /dev/null +++ b/pkg/resource_customizations/jaegertracing.io/Jaeger/testdata/progressing.yaml @@ -0,0 +1,13 @@ +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + generation: 3 + labels: + argocd.argoproj.io/instance: jaeger-operator-default + name: jaeger + namespace: jaeger + resourceVersion: "226432" + selfLink: /apis/jaegertracing.io/v1/namespaces/2269-jaeger/jaegers/jaeger + uid: 48d186c8-47d7-494b-8b6a-435747e406a4 +spec: {} +status: {} \ No newline at end of file diff --git a/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/health.lua b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/health.lua new file mode 100644 index 0000000..7422fd4 --- /dev/null +++ b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/health.lua @@ -0,0 +1,41 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.brokersState ~= nil then + local numberBrokers = 0 + local healthyBrokers = 0 + for _, broker in pairs(obj.status.brokersState) do + numberBrokers = numberBrokers + 1 + if broker.configurationState == "ConfigInSync" then + if broker.gracefulActionState.cruiseControlState == "GracefulUpscaleSucceeded" or broker.gracefulActionState.cruiseControlState == "GracefulDownscaleSucceeded" then + healthyBrokers = healthyBrokers + 1 + end + end + end + if numberBrokers == healthyBrokers then + if obj.status.cruiseControlTopicStatus == "CruiseControlTopicReady" and obj.status.state == "ClusterRunning" then + health_status.message = "Kafka Brokers, CruiseControl and cluster are in Healthy State." + health_status.status = "Healthy" + return health_status + end + if obj.status.cruiseControlTopicStatus == "CruiseControlTopicNotReady" or obj.status.cruiseControlTopicStatus == nil then + if obj.status.state == "ClusterReconciling" then + health_status.message = "Kafka Cluster is Reconciling." + health_status.status = "Progressing" + return health_status + end + if obj.status.state == "ClusterRollingUpgrading" then + health_status.message = "Kafka Cluster is Rolling Upgrading." + health_status.status = "Progressing" + return health_status + end + end + else + health_status.message = "Broker Config is out of Sync or CruiseControlState is not Ready" + health_status.status = "Degraded" + return health_status + end + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for KafkaCluster" +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/health_test.yaml b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/health_test.yaml new file mode 100644 index 0000000..776cc02 --- /dev/null +++ b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/health_test.yaml @@ -0,0 +1,17 @@ +tests: +- healthStatus: + status: Progressing + message: "Kafka Cluster is Reconciling." + inputPath: testdata/reconciling.yaml +- healthStatus: + status: Progressing + message: "Waiting for KafkaCluster" + inputPath: testdata/updating.yaml +- healthStatus: + status: Degraded + message: "Broker Config is out of Sync or CruiseControlState is not Ready" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "Kafka Brokers, CruiseControl and cluster are in Healthy State." + inputPath: testdata/healthy.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/degraded.yaml b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/degraded.yaml new file mode 100644 index 0000000..cbd14ce --- /dev/null +++ b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/degraded.yaml @@ -0,0 +1,48 @@ +apiVersion: kafka.banzaicloud.io/v1beta1 +kind: KafkaCluster +metadata: + finalizers: + - finalizer.kafkaclusters.kafka.banzaicloud.io + - topics.kafkaclusters.kafka.banzaicloud.io + - users.kafkaclusters.kafka.banzaicloud.io + generation: 4 + labels: + argocd.argoproj.io/instance: kafka-cluster + controller-tools.k8s.io: "1.0" + name: kafkacluster + namespace: kafka + name: kafkacluster + namespace: kafka + resourceVersion: "31935335" + selfLink: /apis/kafka.banzaicloud.io/v1beta1/namespaces/2269-kafka/kafkaclusters/kafkacluster + uid: c6affef0-651d-44c7-8bff-638961517c8d +spec: {} +status: + alertCount: 0 + brokersState: + "0": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1c + "1": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1b + "2": + configurationState: ConfigOutOfSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1a + cruiseControlTopicStatus: CruiseControlTopicReady + rollingUpgradeStatus: + errorCount: 0 + lastSuccess: "" + state: ClusterRunning \ No newline at end of file diff --git a/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/healthy.yaml b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/healthy.yaml new file mode 100644 index 0000000..44666fd --- /dev/null +++ b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/healthy.yaml @@ -0,0 +1,48 @@ +apiVersion: kafka.banzaicloud.io/v1beta1 +kind: KafkaCluster +metadata: + finalizers: + - finalizer.kafkaclusters.kafka.banzaicloud.io + - topics.kafkaclusters.kafka.banzaicloud.io + - users.kafkaclusters.kafka.banzaicloud.io + generation: 4 + labels: + argocd.argoproj.io/instance: kafka-cluster + controller-tools.k8s.io: "1.0" + name: kafkacluster + namespace: kafka + name: kafkacluster + namespace: kafka + resourceVersion: "31935335" + selfLink: /apis/kafka.banzaicloud.io/v1beta1/namespaces/2269-kafka/kafkaclusters/kafkacluster + uid: c6affef0-651d-44c7-8bff-638961517c8d +spec: {} +status: + alertCount: 0 + brokersState: + "101": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1c + "102": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1b + "103": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1a + cruiseControlTopicStatus: CruiseControlTopicReady + rollingUpgradeStatus: + errorCount: 0 + lastSuccess: "" + state: ClusterRunning \ No newline at end of file diff --git a/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/reconciling.yaml b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/reconciling.yaml new file mode 100644 index 0000000..8283358 --- /dev/null +++ b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/reconciling.yaml @@ -0,0 +1,48 @@ +apiVersion: kafka.banzaicloud.io/v1beta1 +kind: KafkaCluster +metadata: + finalizers: + - finalizer.kafkaclusters.kafka.banzaicloud.io + - topics.kafkaclusters.kafka.banzaicloud.io + - users.kafkaclusters.kafka.banzaicloud.io + generation: 4 + labels: + argocd.argoproj.io/instance: kafka-cluster + controller-tools.k8s.io: "1.0" + name: kafkacluster + namespace: kafka + name: kafkacluster + namespace: kafka + resourceVersion: "31935335" + selfLink: /apis/kafka.banzaicloud.io/v1beta1/namespaces/2269-kafka/kafkaclusters/kafkacluster + uid: c6affef0-651d-44c7-8bff-638961517c8d +spec: {} +status: + alertCount: 0 + brokersState: + "0": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1c + "1": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1b + "2": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1a + cruiseControlTopicStatus: CruiseControlTopicNotReady + rollingUpgradeStatus: + errorCount: 0 + lastSuccess: "" + state: ClusterReconciling \ No newline at end of file diff --git a/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/updating.yaml b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/updating.yaml new file mode 100644 index 0000000..21459d2 --- /dev/null +++ b/pkg/resource_customizations/kafka.banzaicloud.io/KafkaCluster/testdata/updating.yaml @@ -0,0 +1,48 @@ +apiVersion: kafka.banzaicloud.io/v1beta1 +kind: KafkaCluster +metadata: + finalizers: + - finalizer.kafkaclusters.kafka.banzaicloud.io + - topics.kafkaclusters.kafka.banzaicloud.io + - users.kafkaclusters.kafka.banzaicloud.io + generation: 4 + labels: + argocd.argoproj.io/instance: kafka-cluster + controller-tools.k8s.io: "1.0" + name: kafkacluster + namespace: kafka + name: kafkacluster + namespace: kafka + resourceVersion: "31935335" + selfLink: /apis/kafka.banzaicloud.io/v1beta1/namespaces/2269-kafka/kafkaclusters/kafkacluster + uid: c6affef0-651d-44c7-8bff-638961517c8d +spec: {} +status: + alertCount: 0 + brokersState: + "0": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1c + "1": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1b + "2": + configurationState: ConfigInSync + gracefulActionState: + cruiseControlState: GracefulUpscaleSucceeded + errorMessage: CruiseControl not yet ready + rackAwarenessState: | + broker.rack=us-east-1,us-east-1a + cruiseControlTopicStatus: ClusterRollingUpgrading + rollingUpgradeStatus: + errorCount: 0 + lastSuccess: "" + state: ClusterRunning \ No newline at end of file diff --git a/pkg/resource_customizations/kafka.strimzi.io/Kafka/health.lua b/pkg/resource_customizations/kafka.strimzi.io/Kafka/health.lua new file mode 100644 index 0000000..346a181 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/Kafka/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "NotReady" and condition.status == "True" and condition.reason ~= "Creating" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = "" + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Kafka Cluster" +return hs diff --git a/pkg/resource_customizations/kafka.strimzi.io/Kafka/health_test.yaml b/pkg/resource_customizations/kafka.strimzi.io/Kafka/health_test.yaml new file mode 100644 index 0000000..0038036 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/Kafka/health_test.yaml @@ -0,0 +1,12 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for Kafka Cluster" + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Degraded + message: "Exceeded timeout of 300000ms while waiting for StatefulSet resource my-cluster-zookeeper in namespace default to be ready" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/degraded.yaml b/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/degraded.yaml new file mode 100644 index 0000000..f4d585c --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/degraded.yaml @@ -0,0 +1,47 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: Kafka +metadata: + creationTimestamp: "2021-02-22T15:59:14Z" + generation: 1 + name: my-cluster + namespace: default + resourceVersion: "134745509" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkas/my-cluster + uid: 69de4987-371b-4300-a3cc-545bfab1dc87 +spec: + entityOperator: + topicOperator: {} + userOperator: {} + kafka: + config: + inter.broker.protocol.version: "2.7" + log.message.format.version: "2.7" + offsets.topics.replication.factor: 1 + transaction.state.log.min.isr: 1 + transaction.state.log.replication.factor: 1 + listeners: + - name: plain + port: 9092 + tls: false + type: internal + - name: tls + port: 9093 + tls: false + type: internal + replicas: 1 + storage: + type: ephemeral + zookeeper: + replicas: 1 + storage: + type: ephemeral +status: + clusterId: 463lY141TCqd6DDvPj5EDw + conditions: + - lastTransitionTime: 2021-02-22T16:05:43+0000 + message: Exceeded timeout of 300000ms while waiting for StatefulSet resource my-cluster-zookeeper + in namespace default to be ready + reason: TimeoutException + status: "True" + type: NotReady + observedGeneration: 1 diff --git a/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/healthy.yaml b/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/healthy.yaml new file mode 100644 index 0000000..dc05490 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/healthy.yaml @@ -0,0 +1,61 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: Kafka +metadata: + creationTimestamp: "2021-02-22T15:59:14Z" + generation: 1 + name: my-cluster + namespace: default + resourceVersion: "134745509" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkas/my-cluster + uid: 69de4987-371b-4300-a3cc-545bfab1dc87 +spec: + entityOperator: + topicOperator: {} + userOperator: {} + kafka: + config: + inter.broker.protocol.version: "2.7" + log.message.format.version: "2.7" + offsets.topics.replication.factor: 1 + transaction.state.log.min.isr: 1 + transaction.state.log.replication.factor: 1 + listeners: + - name: plain + port: 9092 + tls: false + type: internal + - name: tls + port: 9093 + tls: false + type: internal + replicas: 1 + storage: + type: ephemeral + zookeeper: + replicas: 1 + storage: + type: ephemeral +status: + clusterId: 463lY141TCqd6DDvPj5EDw + conditions: + - lastTransitionTime: 2021-02-22T16:05:43+0000 + message: A Kafka cluster with a single replica and ephemeral storage will lose + topic messages after any restart or rolling update + reason: KafkaStorage + status: "True" + type: Warning + - lastTransitionTime: 2021-02-22T16:05:47+0000 + status: "True" + type: Ready + listeners: + - addresses: + - host: my-cluster-kafka-bootstrap.default.svc + port: 9092 + bootstrapServers: my-cluster-kafka-bootstrap.default.svc:9092 + type: plain + - addresses: + - host: my-cluster-kafka-bootstrap.default.svc + port: 9093 + bootstrapServers: my-cluster-kafka-bootstrap.default.svc:9093 + type: tls + observedGeneration: 1 diff --git a/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..0dba63e --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/Kafka/testdata/progressing_noStatus.yaml @@ -0,0 +1,37 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: Kafka +metadata: + creationTimestamp: "2021-02-22T15:59:14Z" + generation: 1 + name: my-cluster + namespace: default + resourceVersion: "134745509" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkas/my-cluster + uid: 69de4987-371b-4300-a3cc-545bfab1dc87 +spec: + entityOperator: + topicOperator: {} + userOperator: {} + kafka: + config: + inter.broker.protocol.version: "2.7" + log.message.format.version: "2.7" + offsets.topics.replication.factor: 1 + transaction.state.log.min.isr: 1 + transaction.state.log.replication.factor: 1 + listeners: + - name: plain + port: 9092 + tls: false + type: internal + - name: tls + port: 9093 + tls: false + type: internal + replicas: 1 + storage: + type: ephemeral + zookeeper: + replicas: 1 + storage: + type: ephemeral diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/health.lua b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/health.lua new file mode 100644 index 0000000..8cd15dd --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "NotReady" and condition.status == "True" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = "" + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Kafka Connect" +return hs diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/health_test.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/health_test.yaml new file mode 100644 index 0000000..96b56b2 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/health_test.yaml @@ -0,0 +1,12 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for Kafka Connect" + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Degraded + message: "Error" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/degraded.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/degraded.yaml new file mode 100644 index 0000000..b7c9d15 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/degraded.yaml @@ -0,0 +1,104 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaConnect +metadata: + creationTimestamp: "2020-02-13T14:03:15Z" + deletionGracePeriodSeconds: 0 + deletionTimestamp: "2020-05-28T10:29:44Z" + finalizers: + - foregroundDeletion + generation: 25 + labels: + app.kubernetes.io/instance: kafka-connect + name: kafka + namespace: strimzi + resourceVersion: "43088521" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/strimzi/kafkaconnects/kafka + uid: 941ae21d-4e69-11ea-a53d-06e66a171f98 +spec: + bootstrapServers: PLAINTEXT://b-1.kafka.eu-west-1.amazonaws.com:9092, + PLAINTEXT://b-2.kafka.eu-west-1.amazonaws.com:9092, PLAINTEXT://b-3.kafka.eu-west-1.amazonaws.com:9092 + config: + config.providers: file + config.providers.file.class: org.apache.kafka.common.config.provider.FileConfigProvider + config.storage.topic: connect-configs + connector.client.config.override.policy: All + group.id: connect-cluster + internal.key.converter: org.apache.kafka.connect.json.JsonConverter + internal.key.converter.schemas.enable: "false" + internal.value.converter: org.apache.kafka.connect.json.JsonConverter + internal.value.converter.schemas.enable: "false" + key.converter: org.apache.kafka.connect.storage.StringConverter + key.converter.schemas.enable: "false" + offset.storage.topic: connect-offsets + schema.registry.url: http://kafka-schema-registry:8081 + status.storage.topic: connect-statuses + task.shutdown.graceful.timeout.ms: 30000 + value.converter: io.confluent.connect.avro.AvroConverter + value.converter.schema.registry.url: http://kafka-schema-registry:8081 + value.converter.schemas.enable: "true" + externalConfiguration: + volumes: + - name: kafka-connect-credentials + secret: + secretName: kafka-connect-credentials + image: strimzi/kafka-connect:strimzi_0.17.0_kafka_2.3.1-2 + jvmOptions: + -Xms: 8000m + -Xmx: 8000m + gcLoggingEnabled: false + logging: + type: inline + replicas: 5 + resources: + limits: + cpu: "1" + memory: 7Gi + requests: + cpu: "1" + memory: 5Gi + template: + connectContainer: + env: + - name: JMX_PORT + value: "9999" + pod: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: strimzi.io/name + operator: In + values: + - kafka-connect + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: strimzi.io/name + operator: In + values: + - kafka-connect + topologyKey: topology.kubernetes.io/zone + weight: 100 + metadata: + annotations: + prometheus.io/path: / + prometheus.io/port: "9404" + prometheus.io/scrape: true + priorityClassName: kafka-connect + tolerations: + - effect: NoSchedule + key: dynamic-node + operator: Equal + value: "true" +status: + conditions: + - lastTransitionTime: "2020-05-28T13:43:05.813Z" + status: "True" + type: NotReady + message: "Error" + observedGeneration: 25 + url: http://kafka-connect-api.strimzi.svc:8083 diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/healthy.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/healthy.yaml new file mode 100644 index 0000000..1d03d3b --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/healthy.yaml @@ -0,0 +1,103 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaConnect +metadata: + creationTimestamp: "2020-02-13T14:03:15Z" + deletionGracePeriodSeconds: 0 + deletionTimestamp: "2020-05-28T10:29:44Z" + finalizers: + - foregroundDeletion + generation: 25 + labels: + app.kubernetes.io/instance: kafka-connect + name: kafka + namespace: strimzi + resourceVersion: "43088521" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/strimzi/kafkaconnects/kafka + uid: 941ae21d-4e69-11ea-a53d-06e66a171f98 +spec: + bootstrapServers: PLAINTEXT://b-1.kafka.eu-west-1.amazonaws.com:9092, + PLAINTEXT://b-2.kafka.eu-west-1.amazonaws.com:9092, PLAINTEXT://b-3.kafka.eu-west-1.amazonaws.com:9092 + config: + config.providers: file + config.providers.file.class: org.apache.kafka.common.config.provider.FileConfigProvider + config.storage.topic: connect-configs + connector.client.config.override.policy: All + group.id: connect-cluster + internal.key.converter: org.apache.kafka.connect.json.JsonConverter + internal.key.converter.schemas.enable: "false" + internal.value.converter: org.apache.kafka.connect.json.JsonConverter + internal.value.converter.schemas.enable: "false" + key.converter: org.apache.kafka.connect.storage.StringConverter + key.converter.schemas.enable: "false" + offset.storage.topic: connect-offsets + schema.registry.url: http://kafka-schema-registry:8081 + status.storage.topic: connect-statuses + task.shutdown.graceful.timeout.ms: 30000 + value.converter: io.confluent.connect.avro.AvroConverter + value.converter.schema.registry.url: http://kafka-schema-registry:8081 + value.converter.schemas.enable: "true" + externalConfiguration: + volumes: + - name: kafka-connect-credentials + secret: + secretName: kafka-connect-credentials + image: strimzi/kafka-connect:strimzi_0.17.0_kafka_2.3.1-2 + jvmOptions: + -Xms: 8000m + -Xmx: 8000m + gcLoggingEnabled: false + logging: + type: inline + replicas: 5 + resources: + limits: + cpu: "1" + memory: 7Gi + requests: + cpu: "1" + memory: 5Gi + template: + connectContainer: + env: + - name: JMX_PORT + value: "9999" + pod: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: strimzi.io/name + operator: In + values: + - kafka-connect + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: strimzi.io/name + operator: In + values: + - kafka-connect + topologyKey: topology.kubernetes.io/zone + weight: 100 + metadata: + annotations: + prometheus.io/path: / + prometheus.io/port: "9404" + prometheus.io/scrape: true + priorityClassName: kafka-connect + tolerations: + - effect: NoSchedule + key: dynamic-node + operator: Equal + value: "true" +status: + conditions: + - lastTransitionTime: "2020-05-28T13:43:05.813Z" + status: "True" + type: Ready + observedGeneration: 25 + url: http://kafka-connect-api.strimzi.svc:8083 diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..7f21852 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaConnect/testdata/progressing_noStatus.yaml @@ -0,0 +1,96 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaConnect +metadata: + creationTimestamp: "2020-02-13T14:03:15Z" + deletionGracePeriodSeconds: 0 + deletionTimestamp: "2020-05-28T10:29:44Z" + finalizers: + - foregroundDeletion + generation: 25 + labels: + app.kubernetes.io/instance: kafka-connect + name: kafka + namespace: strimzi + resourceVersion: "43088521" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/strimzi/kafkaconnects/kafka + uid: 941ae21d-4e69-11ea-a53d-06e66a171f98 +spec: + bootstrapServers: PLAINTEXT://b-1.kafka.eu-west-1.amazonaws.com:9092, + PLAINTEXT://b-2.kafka.eu-west-1.amazonaws.com:9092, PLAINTEXT://b-3.kafka.eu-west-1.amazonaws.com:9092 + config: + config.providers: file + config.providers.file.class: org.apache.kafka.common.config.provider.FileConfigProvider + config.storage.topic: connect-configs + connector.client.config.override.policy: All + group.id: connect-cluster + internal.key.converter: org.apache.kafka.connect.json.JsonConverter + internal.key.converter.schemas.enable: "false" + internal.value.converter: org.apache.kafka.connect.json.JsonConverter + internal.value.converter.schemas.enable: "false" + key.converter: org.apache.kafka.connect.storage.StringConverter + key.converter.schemas.enable: "false" + offset.storage.topic: connect-offsets + schema.registry.url: http://kafka-schema-registry:8081 + status.storage.topic: connect-statuses + task.shutdown.graceful.timeout.ms: 30000 + value.converter: io.confluent.connect.avro.AvroConverter + value.converter.schema.registry.url: http://kafka-schema-registry:8081 + value.converter.schemas.enable: "true" + externalConfiguration: + volumes: + - name: kafka-connect-credentials + secret: + secretName: kafka-connect-credentials + image: strimzi/kafka-connect:strimzi_0.17.0_kafka_2.3.1-2 + jvmOptions: + -Xms: 8000m + -Xmx: 8000m + gcLoggingEnabled: false + logging: + type: inline + replicas: 5 + resources: + limits: + cpu: "1" + memory: 7Gi + requests: + cpu: "1" + memory: 5Gi + template: + connectContainer: + env: + - name: JMX_PORT + value: "9999" + pod: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: strimzi.io/name + operator: In + values: + - kafka-connect + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: strimzi.io/name + operator: In + values: + - kafka-connect + topologyKey: topology.kubernetes.io/zone + weight: 100 + metadata: + annotations: + prometheus.io/path: / + prometheus.io/port: "9404" + prometheus.io/scrape: true + priorityClassName: kafka-connect + tolerations: + - effect: NoSchedule + key: dynamic-node + operator: Equal + value: "true" diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/health.lua b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/health.lua new file mode 100644 index 0000000..2d3ada3 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "NotReady" and condition.status == "True" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = "" + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Kafka Topic" +return hs diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/health_test.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/health_test.yaml new file mode 100644 index 0000000..9da08aa --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/health_test.yaml @@ -0,0 +1,12 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for Kafka Topic" + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Degraded + message: "Invalid value abcd for configuration retention.ms: Not a number of type LONG" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/degraded.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/degraded.yaml new file mode 100644 index 0000000..6d22021 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/degraded.yaml @@ -0,0 +1,27 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaTopic +metadata: + creationTimestamp: "2021-02-22T16:10:54Z" + generation: 1 + labels: + strimzi.io/cluster: my-cluster + name: my-topic + namespace: default + resourceVersion: "134751516" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkatopics/my-topic + uid: 13b7644a-ec46-4f24-a198-18962fda4b85 +spec: + config: + retention.ms: 7200000 + segment.bytes: 1073741824 + partitions: 1 + replicas: 1 +status: + conditions: + - lastTransitionTime: "2021-02-22T16:10:55.003372Z" + message: 'Invalid value abcd for configuration retention.ms: Not a number of type + LONG' + reason: InvalidConfigurationException + status: "True" + type: NotReady + observedGeneration: 1 diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/healthy.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/healthy.yaml new file mode 100644 index 0000000..0a57875 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/healthy.yaml @@ -0,0 +1,24 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaTopic +metadata: + creationTimestamp: "2021-02-22T16:10:54Z" + generation: 1 + labels: + strimzi.io/cluster: my-cluster + name: my-topic + namespace: default + resourceVersion: "134751516" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkatopics/my-topic + uid: 13b7644a-ec46-4f24-a198-18962fda4b85 +spec: + config: + retention.ms: 7200000 + segment.bytes: 1073741824 + partitions: 1 + replicas: 1 +status: + conditions: + - lastTransitionTime: "2021-02-22T16:10:55.003372Z" + status: "True" + type: Ready + observedGeneration: 1 diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..5c39e2a --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaTopic/testdata/progressing_noStatus.yaml @@ -0,0 +1,18 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaTopic +metadata: + creationTimestamp: "2021-02-22T16:10:54Z" + generation: 1 + labels: + strimzi.io/cluster: my-cluster + name: my-topic + namespace: default + resourceVersion: "134751516" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkatopics/my-topic + uid: 13b7644a-ec46-4f24-a198-18962fda4b85 +spec: + config: + retention.ms: 7200000 + segment.bytes: 1073741824 + partitions: 1 + replicas: 1 diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/health.lua b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/health.lua new file mode 100644 index 0000000..44172d1 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "NotReady" and condition.status == "True" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = "" + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Kafka User" +return hs diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/health_test.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/health_test.yaml new file mode 100644 index 0000000..8178431 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/health_test.yaml @@ -0,0 +1,12 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for Kafka User" + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Degraded + message: "Authorization needs to be enabled in the Kafka custom resource" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/degraded.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/degraded.yaml new file mode 100644 index 0000000..0129b34 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/degraded.yaml @@ -0,0 +1,58 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaUser +metadata: + creationTimestamp: "2021-02-22T15:59:18Z" + generation: 1 + labels: + strimzi.io/cluster: my-cluster + name: my-user + namespace: default + resourceVersion: "134744742" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkausers/my-user + uid: 4610a4d8-bd73-4ab9-bed8-971ee5dabf7d +spec: + authentication: + type: tls + authorization: + acls: + - host: '*' + operation: Read + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Describe + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Read + resource: + name: my-group + patternType: literal + type: group + - host: '*' + operation: Write + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Create + resource: + name: my-topic + patternType: literal + type: topic + type: simple +status: + conditions: + - lastTransitionTime: "2021-02-22T16:05:09.386834Z" + message: Authorization needs to be enabled in the Kafka custom resource + reason: InvalidResourceException + status: "True" + type: NotReady + observedGeneration: 1 + secret: my-user + username: CN=my-user diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/healthy.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/healthy.yaml new file mode 100644 index 0000000..147acde --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/healthy.yaml @@ -0,0 +1,56 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaUser +metadata: + creationTimestamp: "2021-02-22T15:59:18Z" + generation: 1 + labels: + strimzi.io/cluster: my-cluster + name: my-user + namespace: default + resourceVersion: "134744742" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkausers/my-user + uid: 4610a4d8-bd73-4ab9-bed8-971ee5dabf7d +spec: + authentication: + type: tls + authorization: + acls: + - host: '*' + operation: Read + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Describe + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Read + resource: + name: my-group + patternType: literal + type: group + - host: '*' + operation: Write + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Create + resource: + name: my-topic + patternType: literal + type: topic + type: simple +status: + conditions: + - lastTransitionTime: "2021-02-22T16:05:09.386834Z" + status: "True" + type: Ready + observedGeneration: 1 + secret: my-user + username: CN=my-user diff --git a/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..d58bc69 --- /dev/null +++ b/pkg/resource_customizations/kafka.strimzi.io/KafkaUser/testdata/progressing_noStatus.yaml @@ -0,0 +1,48 @@ +apiVersion: kafka.strimzi.io/v1beta1 +kind: KafkaUser +metadata: + creationTimestamp: "2021-02-22T15:59:18Z" + generation: 1 + labels: + strimzi.io/cluster: my-cluster + name: my-user + namespace: default + resourceVersion: "134744742" + selfLink: /apis/kafka.strimzi.io/v1beta1/namespaces/default/kafkausers/my-user + uid: 4610a4d8-bd73-4ab9-bed8-971ee5dabf7d +spec: + authentication: + type: tls + authorization: + acls: + - host: '*' + operation: Read + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Describe + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Read + resource: + name: my-group + patternType: literal + type: group + - host: '*' + operation: Write + resource: + name: my-topic + patternType: literal + type: topic + - host: '*' + operation: Create + resource: + name: my-topic + patternType: literal + type: topic + type: simple diff --git a/pkg/resource_customizations/kiali.io/Kiali/health.lua b/pkg/resource_customizations/kiali.io/Kiali/health.lua new file mode 100644 index 0000000..ea066cf --- /dev/null +++ b/pkg/resource_customizations/kiali.io/Kiali/health.lua @@ -0,0 +1,23 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + health_status.message = condition.message + if condition.type == "Successful" and condition.status == "True" then + health_status.status = "Healthy" + return health_status + end + if condition.type == "Failure" and condition.status == "True" then + health_status.status = "Degraded" + return health_status + end + if condition.type == "Running" and condition.reason == "Running" then + health_status.status = "Progressing" + return health_status + end + end + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for Kiali" +return health_status diff --git a/pkg/resource_customizations/kiali.io/Kiali/health_test.yaml b/pkg/resource_customizations/kiali.io/Kiali/health_test.yaml new file mode 100644 index 0000000..9c4b5b4 --- /dev/null +++ b/pkg/resource_customizations/kiali.io/Kiali/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "Reconciling" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "Error Reconciling" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "Last reconciliation succeeded" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/kiali.io/Kiali/testdata/degraded.yaml b/pkg/resource_customizations/kiali.io/Kiali/testdata/degraded.yaml new file mode 100644 index 0000000..8c5f242 --- /dev/null +++ b/pkg/resource_customizations/kiali.io/Kiali/testdata/degraded.yaml @@ -0,0 +1,37 @@ +apiVersion: kiali.io/v1alpha1 +kind: Kiali +metadata: + finalizers: + - finalizer.kiali + generation: 1 + labels: + argocd.argoproj.io/instance: kiali-default + name: kiali + namespace: kiali + resourceVersion: "344752" + selfLink: /apis/kiali.io/v1alpha1/namespaces/2269-kiali/kialis/kiali + uid: a59d8dac-900b-4e05-9cb7-cf96a0bf4b6b +spec: {} +status: + conditions: + - lastTransitionTime: '2022-10-19T09:44:32Z' + message: '' + reason: '' + status: 'False' + type: Failure + - ansibleResult: + changed: 18 + completion: '2022-10-19T09:44:32.289505' + failures: 0 + ok: 101 + skipped: 101 + lastTransitionTime: '2022-10-19T09:43:39Z' + message: Awaiting next reconciliation + reason: Successful + status: 'True' + type: Running + - lastTransitionTime: '2022-10-19T09:44:32Z' + message: Error Reconciling + reason: Failure + status: 'True' + type: Failure diff --git a/pkg/resource_customizations/kiali.io/Kiali/testdata/healthy.yaml b/pkg/resource_customizations/kiali.io/Kiali/testdata/healthy.yaml new file mode 100644 index 0000000..807843d --- /dev/null +++ b/pkg/resource_customizations/kiali.io/Kiali/testdata/healthy.yaml @@ -0,0 +1,37 @@ +apiVersion: kiali.io/v1alpha1 +kind: Kiali +metadata: + finalizers: + - finalizer.kiali + generation: 1 + labels: + argocd.argoproj.io/instance: kiali-default + name: kiali + namespace: kiali + resourceVersion: "344752" + selfLink: /apis/kiali.io/v1alpha1/namespaces/2269-kiali/kialis/kiali + uid: a59d8dac-900b-4e05-9cb7-cf96a0bf4b6b +spec: {} +status: + conditions: + - lastTransitionTime: '2022-10-19T09:44:32Z' + message: '' + reason: '' + status: 'False' + type: Failure + - ansibleResult: + changed: 18 + completion: '2022-10-19T09:44:32.289505' + failures: 0 + ok: 101 + skipped: 101 + lastTransitionTime: '2022-10-19T09:43:39Z' + message: Awaiting next reconciliation + reason: Successful + status: 'True' + type: Running + - lastTransitionTime: '2022-10-19T09:44:32Z' + message: Last reconciliation succeeded + reason: Successful + status: 'True' + type: Successful diff --git a/pkg/resource_customizations/kiali.io/Kiali/testdata/progressing.yaml b/pkg/resource_customizations/kiali.io/Kiali/testdata/progressing.yaml new file mode 100644 index 0000000..d50ab4f --- /dev/null +++ b/pkg/resource_customizations/kiali.io/Kiali/testdata/progressing.yaml @@ -0,0 +1,27 @@ +apiVersion: kiali.io/v1alpha1 +kind: Kiali +metadata: + finalizers: + - finalizer.kiali + generation: 1 + labels: + argocd.argoproj.io/instance: kiali-default + name: kiali + namespace: kiali + resourceVersion: "344752" + selfLink: /apis/kiali.io/v1alpha1/namespaces/2269-kiali/kialis/kiali + uid: a59d8dac-900b-4e05-9cb7-cf96a0bf4b6b +spec: {} +status: + conditions: + - ansibleResult: + changed: 1 + completion: 2020-06-08T13:41:20.133525 + failures: 0 + ok: 56 + skipped: 82 + lastTransitionTime: "2020-06-04T17:47:31Z" + message: Reconciling + reason: Running + status: "True" + type: Running \ No newline at end of file diff --git a/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/health.lua b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/health.lua new file mode 100644 index 0000000..8e41091 --- /dev/null +++ b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/health.lua @@ -0,0 +1,17 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.status == "SUCCESS" then + health_status.status = "Healthy" + health_status.message = "Fetched ExternalSecret." + elseif obj.status.status:find('^ERROR') ~= nil then + health_status.status = "Degraded" + health_status.message = obj.status.status:gsub("ERROR, ", "") + else + health_status.status = "Progressing" + health_status.message = "Waiting for ExternalSecret." + end + return health_status +end +health_status.status = "Progressing" +health_status.message = "Waiting for ExternalSecret." +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/health_test.yaml b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/health_test.yaml new file mode 100644 index 0000000..2c83bde --- /dev/null +++ b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for ExternalSecret." + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "Secrets Manager can't find the specified secret." + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "Fetched ExternalSecret." + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/degraded.yaml b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/degraded.yaml new file mode 100644 index 0000000..dc8de97 --- /dev/null +++ b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/degraded.yaml @@ -0,0 +1,14 @@ +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: external-secret + namespace: external-secret +spec: + backendType: secretsManager + data: + - key: /external/secret/name + name: EXTERNAL_SECRET_NAME +status: + lastSync: "2021-02-06T21:44:40.631Z" + observedGeneration: 1 + status: ERROR, Secrets Manager can't find the specified secret. diff --git a/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/healthy.yaml b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/healthy.yaml new file mode 100644 index 0000000..7b03309 --- /dev/null +++ b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/healthy.yaml @@ -0,0 +1,14 @@ +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: external-secret + namespace: external-secret +spec: + backendType: secretsManager + data: + - key: /external/secret/name + name: EXTERNAL_SECRET_NAME +status: + lastSync: "2021-02-06T21:44:40.631Z" + observedGeneration: 1 + status: SUCCESS diff --git a/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/progressing.yaml b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/progressing.yaml new file mode 100644 index 0000000..180104b --- /dev/null +++ b/pkg/resource_customizations/kubernetes-client.io/ExternalSecret/testdata/progressing.yaml @@ -0,0 +1,10 @@ +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: external-secret + namespace: external-secret +spec: + backendType: secretsManager + data: + - key: /external/secret/name + name: EXTERNAL_SECRET_NAME diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachine/health.lua b/pkg/resource_customizations/kubevirt.io/VirtualMachine/health.lua new file mode 100644 index 0000000..6c0d633 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachine/health.lua @@ -0,0 +1,29 @@ +local hs = { status="Progressing", message="No status available"} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Paused" and condition.status == "True" then + hs.status = "Suspended" + hs.message = "Paused" + return hs + end + if condition.type == "Ready" then + if condition.status == "True" then + hs.status="Healthy" + hs.message="Running" + else + if obj.status.created then + hs.message = "Starting" + else + hs.status = "Suspended" + hs.message = "Stopped" + end + end + end + end + end + if obj.status.printableStatus ~= nil then + hs.message = obj.status.printableStatus + end +end +return hs diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachine/health_test.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachine/health_test.yaml new file mode 100644 index 0000000..abe0040 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachine/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Progressing + message: "No status available" + inputPath: testdata/progressing_nostatus.yaml +- healthStatus: + status: Healthy + message: "Running" + inputPath: testdata/healthy_ready.yaml +- healthStatus: + status: Suspended + message: "Stopped" + inputPath: testdata/suspended_stopped.yaml +- healthStatus: + status: Progressing + message: "Starting" + inputPath: testdata/progressing_starting.yaml +- healthStatus: + status: Suspended + message: "Paused" + inputPath: testdata/suspended_paused.yaml diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/healthy_ready.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/healthy_ready.yaml new file mode 100644 index 0000000..5834e15 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/healthy_ready.yaml @@ -0,0 +1,63 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + kubevirt.io/latest-observed-api-version: v1 + kubevirt.io/storage-observed-api-version: v1alpha3 + name: testvm + namespace: default + resourceVersion: "10971" + selfLink: /apis/kubevirt.io/v1/namespaces/default/virtualmachines/testvm + uid: 8cfec9a9-d736-4c10-8d91-bd3cafb69cb3 +spec: + running: true + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/domain: testvm + kubevirt.io/size: small + spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + machine: + type: q35 + resources: + requests: + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + conditions: + - lastProbeTime: null + lastTransitionTime: "2021-09-10T16:17:44Z" + status: "True" + type: Ready + created: true + printableStatus: Running + ready: true + volumeSnapshotStatuses: + - enabled: false + name: containerdisk + reason: Snapshot is not supported for this volumeSource type [containerdisk] + - enabled: false + name: cloudinitdisk + reason: Snapshot is not supported for this volumeSource type [cloudinitdisk] diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/progressing_nostatus.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/progressing_nostatus.yaml new file mode 100644 index 0000000..0e93bdc --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/progressing_nostatus.yaml @@ -0,0 +1,54 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + creationTimestamp: "2021-07-16T00:41:10Z" + labels: + app.kubernetes.io/instance: testv + name: testvm + namespace: testvm +spec: + running: true + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/domain: testvm + kubevirt.io/size: small + spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + machine: + type: pc-q35-rhel8.4.0 + resources: + requests: + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + volumeSnapshotStatuses: + - enabled: false + name: containerdisk + reason: Volume type has no StorageClass defined + - enabled: false + name: cloudinitdisk + reason: Volume type has no StorageClass defined + diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/progressing_starting.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/progressing_starting.yaml new file mode 100644 index 0000000..d52e44d --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/progressing_starting.yaml @@ -0,0 +1,61 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + kubevirt.io/latest-observed-api-version: v1 + kubevirt.io/storage-observed-api-version: v1alpha3 + name: testvm + namespace: default +spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/domain: testvm + kubevirt.io/size: small + spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + machine: + type: q35 + resources: + requests: + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + conditions: + - lastProbeTime: "2021-09-10T16:17:33Z" + lastTransitionTime: "2021-09-10T16:17:33Z" + message: Guest VM is not reported as running + reason: GuestNotRunning + status: "False" + type: Ready + created: true + printableStatus: Starting + volumeSnapshotStatuses: + - enabled: false + name: containerdisk + reason: Snapshot is not supported for this volumeSource type [containerdisk] + - enabled: false + name: cloudinitdisk + reason: Snapshot is not supported for this volumeSource type [cloudinitdisk] diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/suspended_paused.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/suspended_paused.yaml new file mode 100644 index 0000000..0ad92d6 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/suspended_paused.yaml @@ -0,0 +1,67 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + kubevirt.io/latest-observed-api-version: v1 + kubevirt.io/storage-observed-api-version: v1alpha3 + creationTimestamp: "2021-09-14T22:15:10Z" + name: testvm + namespace: default +spec: + running: true + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/domain: testvm + kubevirt.io/size: small + spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + machine: + type: q35 + resources: + requests: + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + conditions: + - lastProbeTime: null + lastTransitionTime: "2021-09-24T18:45:01Z" + status: "True" + type: Ready + - lastProbeTime: "2021-09-24T18:48:57Z" + lastTransitionTime: "2021-09-24T18:48:57Z" + message: VMI was paused by user + reason: PausedByUser + status: "True" + type: Paused + created: true + printableStatus: Paused + ready: true + volumeSnapshotStatuses: + - enabled: false + name: containerdisk + reason: Snapshot is not supported for this volumeSource type [containerdisk] + - enabled: false + name: cloudinitdisk + reason: Snapshot is not supported for this volumeSource type [cloudinitdisk] diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/suspended_stopped.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/suspended_stopped.yaml new file mode 100644 index 0000000..ee62dbd --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachine/testdata/suspended_stopped.yaml @@ -0,0 +1,60 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + kubevirt.io/latest-observed-api-version: v1 + kubevirt.io/storage-observed-api-version: v1alpha3 + name: testvm + namespace: default +spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/domain: testvm + kubevirt.io/size: small + spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + machine: + type: q35 + resources: + requests: + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + conditions: + - lastProbeTime: "2021-09-10T16:11:53Z" + lastTransitionTime: "2021-09-10T16:11:53Z" + message: VMI does not exist + reason: VMINotExists + status: "False" + type: Ready + printableStatus: Stopped + volumeSnapshotStatuses: + - enabled: false + name: containerdisk + reason: Snapshot is not supported for this volumeSource type [containerdisk] + - enabled: false + name: cloudinitdisk + reason: Snapshot is not supported for this volumeSource type [cloudinitdisk] diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/health.lua b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/health.lua new file mode 100644 index 0000000..2735207 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/health.lua @@ -0,0 +1,36 @@ +local hs = { status="Progressing", message="No status available"} +if obj.status ~= nil then + if obj.status.phase ~= nil then + hs.message = obj.status.phase + if hs.message == "Failed" then + hs.status = "Degraded" + return hs + elseif hs.message == "Pending" or hs.message == "Scheduling" or hs.message == "Scheduled" then + return hs + elseif hs.message == "Succeeded" then + hs.status = "Suspended" + return hs + elseif hs.message == "Unknown" then + hs.status = "Unknown" + return hs + end + end + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" then + if condition.status == "True" then + hs.status = "Healthy" + hs.message = "Running" + else + hs.status = "Degraded" + hs.message = condition.message + end + elseif condition.type == "Paused" and condition.status == "True" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/health_test.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/health_test.yaml new file mode 100644 index 0000000..bc6a9c8 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Progressing + message: "No status available" + inputPath: testdata/progressing_nostatus.yaml +- healthStatus: + status: Progressing + message: "Scheduled" + inputPath: testdata/progressing_scheduling.yaml +- healthStatus: + status: Healthy + message: "Running" + inputPath: testdata/healthy_ready.yaml +- healthStatus: + status: "Suspended" + message: "Succeeded" + inputPath: testdata/suspended_succeeded.yaml +- healthStatus: + status: "Suspended" + message: "VMI was paused by user" + inputPath: testdata/suspended_paused.yaml diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/healthy_ready.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/healthy_ready.yaml new file mode 100644 index 0000000..5f321b1 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/healthy_ready.yaml @@ -0,0 +1,104 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachineInstance +metadata: + annotations: + kubevirt.io/latest-observed-api-version: v1 + kubevirt.io/storage-observed-api-version: v1alpha3 + creationTimestamp: "2021-09-14T23:39:01Z" + finalizers: + - kubevirt.io/virtualMachineControllerFinalize + - foregroundDeleteVirtualMachine + generation: 8 + labels: + kubevirt.io/domain: testvm + kubevirt.io/nodeName: minikube + kubevirt.io/size: small + name: testvm + namespace: default + ownerReferences: + - apiVersion: kubevirt.io/v1 + blockOwnerDeletion: true + controller: true + kind: VirtualMachine + name: testvm + uid: 40e44180-3dfb-4813-ace6-9f2d2c76c326 + resourceVersion: "602262" + uid: 18648b62-36b6-4b86-bd16-a288aa132203 +spec: + domain: + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + features: + acpi: + enabled: true + firmware: + uuid: 5a9fc181-957e-5c32-9e5a-2de5e9673531 + machine: + type: q35 + resources: + requests: + cpu: 100m + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + imagePullPolicy: Always + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + activePods: + 6f699622-20ba-4be2-b84c-aea8141b65b2: minikube + conditions: + - lastProbeTime: null + lastTransitionTime: "2021-09-14T23:39:06Z" + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: "True" + type: LiveMigratable + guestOSInfo: {} + interfaces: + - ipAddress: 172.17.0.11 + ipAddresses: + - 172.17.0.11 + mac: 52:54:00:b1:18:f3 + name: default + launcherContainerImageVersion: quay.io/kubevirt/virt-launcher:v0.45.0 + migrationMethod: BlockMigration + nodeName: minikube + phase: Running + phaseTransitionTimestamps: + - phase: Pending + phaseTransitionTimestamp: "2021-09-14T23:39:01Z" + - phase: Scheduling + phaseTransitionTimestamp: "2021-09-14T23:39:01Z" + - phase: Scheduled + phaseTransitionTimestamp: "2021-09-14T23:39:06Z" + - phase: Running + phaseTransitionTimestamp: "2021-09-14T23:39:08Z" + qosClass: Burstable + virtualMachineRevisionName: revision-start-vm-40e44180-3dfb-4813-ace6-9f2d2c76c326-6 + volumeStatus: + - name: cloudinitdisk + target: vdb + - name: containerdisk + target: vda diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_nostatus.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_nostatus.yaml new file mode 100644 index 0000000..87f91d1 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_nostatus.yaml @@ -0,0 +1,45 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachineInstance +metadata: + labels: + kubevirt.io/domain: fedora-gitops1 + name: fedora-testvm + namespace: default +spec: + domain: + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + disks: + - disk: + bus: virtio + name: fedora + interfaces: + - macAddress: 02:d6:2b:00:00:02 + masquerade: {} + name: default + networkInterfaceMultiqueue: true + rng: {} + features: + acpi: + enabled: true + firmware: + uuid: 15c031fd-7655-53c8-96d1-25810660149a + machine: + type: pc-q35-rhel8.4.0 + resources: + requests: + cpu: 100m + memory: 2Gi + evictionStrategy: LiveMigrate + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 180 + volumes: + - dataVolume: + name: fedora + name: fedora +status: {} diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_pending.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_pending.yaml new file mode 100644 index 0000000..8839268 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_pending.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +items: +- apiVersion: kubevirt.io/v1 + kind: VirtualMachineInstance + metadata: + creationTimestamp: "2021-09-14T23:39:01Z" + finalizers: + - kubevirt.io/virtualMachineControllerFinalize + - foregroundDeleteVirtualMachine + generation: 1 + labels: + kubevirt.io/domain: testvm + kubevirt.io/size: small + name: testvm + namespace: default + ownerReferences: + - apiVersion: kubevirt.io/v1 + blockOwnerDeletion: true + controller: true + kind: VirtualMachine + name: testvm + uid: 40e44180-3dfb-4813-ace6-9f2d2c76c326 + resourceVersion: "602212" + uid: 18648b62-36b6-4b86-bd16-a288aa132203 + spec: + domain: + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + features: + acpi: + enabled: true + firmware: + uuid: 5a9fc181-957e-5c32-9e5a-2de5e9673531 + machine: + type: q35 + resources: + requests: + cpu: 100m + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + imagePullPolicy: Always + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk + status: + guestOSInfo: {} + phase: Pending + phaseTransitionTimestamps: + - phase: Pending + phaseTransitionTimestamp: "2021-09-14T23:39:01Z" + virtualMachineRevisionName: revision-start-vm-40e44180-3dfb-4813-ace6-9f2d2c76c326-6 +kind: List +metadata: + resourceVersion: "" + selfLink: "" diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_scheduling.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_scheduling.yaml new file mode 100644 index 0000000..4fb6df1 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/progressing_scheduling.yaml @@ -0,0 +1,92 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachineInstance +metadata: + annotations: + kubevirt.io/latest-observed-api-version: v1 + kubevirt.io/storage-observed-api-version: v1alpha3 + creationTimestamp: "2021-09-14T23:39:01Z" + finalizers: + - kubevirt.io/virtualMachineControllerFinalize + - foregroundDeleteVirtualMachine + generation: 5 + labels: + kubevirt.io/domain: testvm + kubevirt.io/nodeName: minikube + kubevirt.io/size: small + name: testvm + namespace: default + ownerReferences: + - apiVersion: kubevirt.io/v1 + blockOwnerDeletion: true + controller: true + kind: VirtualMachine + name: testvm + uid: 40e44180-3dfb-4813-ace6-9f2d2c76c326 + resourceVersion: "602255" + uid: 18648b62-36b6-4b86-bd16-a288aa132203 +spec: + domain: + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + features: + acpi: + enabled: true + firmware: + uuid: 5a9fc181-957e-5c32-9e5a-2de5e9673531 + machine: + type: q35 + resources: + requests: + cpu: 100m + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + imagePullPolicy: Always + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + activePods: + 6f699622-20ba-4be2-b84c-aea8141b65b2: minikube + conditions: + - lastProbeTime: "2021-09-14T23:39:01Z" + lastTransitionTime: "2021-09-14T23:39:01Z" + message: Guest VM is not reported as running + reason: GuestNotRunning + status: "False" + type: Ready + guestOSInfo: {} + nodeName: minikube + phase: Scheduled + phaseTransitionTimestamps: + - phase: Pending + phaseTransitionTimestamp: "2021-09-14T23:39:01Z" + - phase: Scheduling + phaseTransitionTimestamp: "2021-09-14T23:39:01Z" + - phase: Scheduled + phaseTransitionTimestamp: "2021-09-14T23:39:06Z" + qosClass: Burstable + virtualMachineRevisionName: revision-start-vm-40e44180-3dfb-4813-ace6-9f2d2c76c326-6 + volumeStatus: + - name: cloudinitdisk + target: "" + - name: containerdisk + target: "" diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/suspended_paused.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/suspended_paused.yaml new file mode 100644 index 0000000..2263935 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/suspended_paused.yaml @@ -0,0 +1,110 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachineInstance +metadata: + annotations: + kubevirt.io/latest-observed-api-version: v1 + kubevirt.io/storage-observed-api-version: v1alpha3 + creationTimestamp: "2021-09-24T18:44:53Z" + finalizers: + - kubevirt.io/virtualMachineControllerFinalize + - foregroundDeleteVirtualMachine + generation: 9 + labels: + kubevirt.io/domain: testvm + kubevirt.io/nodeName: minikube + kubevirt.io/size: small + name: testvm + namespace: default + ownerReferences: + - apiVersion: kubevirt.io/v1 + blockOwnerDeletion: true + controller: true + kind: VirtualMachine + name: testvm + uid: 40e44180-3dfb-4813-ace6-9f2d2c76c326 + resourceVersion: "867783" + uid: 280e9c9c-1c43-4cfa-8098-c70f74dae01d +spec: + domain: + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + features: + acpi: + enabled: true + firmware: + uuid: 5a9fc181-957e-5c32-9e5a-2de5e9673531 + machine: + type: q35 + resources: + requests: + cpu: 100m + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + imagePullPolicy: Always + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + activePods: + 8689d878-8688-45eb-9676-da41bac5b935: minikube + conditions: + - lastProbeTime: null + lastTransitionTime: "2021-09-24T18:45:01Z" + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: "True" + type: LiveMigratable + - lastProbeTime: "2021-09-24T18:48:57Z" + lastTransitionTime: "2021-09-24T18:48:57Z" + message: VMI was paused by user + reason: PausedByUser + status: "True" + type: Paused + guestOSInfo: {} + interfaces: + - ipAddress: 172.17.0.11 + ipAddresses: + - 172.17.0.11 + mac: 52:54:00:c9:c9:b1 + name: default + launcherContainerImageVersion: quay.io/kubevirt/virt-launcher:v0.45.0 + migrationMethod: BlockMigration + nodeName: minikube + phase: Running + phaseTransitionTimestamps: + - phase: Pending + phaseTransitionTimestamp: "2021-09-24T18:44:53Z" + - phase: Scheduling + phaseTransitionTimestamp: "2021-09-24T18:44:53Z" + - phase: Scheduled + phaseTransitionTimestamp: "2021-09-24T18:45:01Z" + - phase: Running + phaseTransitionTimestamp: "2021-09-24T18:45:02Z" + qosClass: Burstable + virtualMachineRevisionName: revision-start-vm-40e44180-3dfb-4813-ace6-9f2d2c76c326-6 + volumeStatus: + - name: cloudinitdisk + target: vdb + - name: containerdisk + target: vda diff --git a/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/suspended_succeeded.yaml b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/suspended_succeeded.yaml new file mode 100644 index 0000000..273efb2 --- /dev/null +++ b/pkg/resource_customizations/kubevirt.io/VirtualMachineInstance/testdata/suspended_succeeded.yaml @@ -0,0 +1,100 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachineInstance +metadata: + annotations: + kubevirt.io/latest-observed-api-version: v1 + kubevirt.io/storage-observed-api-version: v1alpha3 + creationTimestamp: "2021-09-14T22:27:08Z" + deletionGracePeriodSeconds: 0 + deletionTimestamp: "2021-09-14T23:07:57Z" + finalizers: + - foregroundDeleteVirtualMachine + generation: 11 + labels: + kubevirt.io/domain: testvm + kubevirt.io/nodeName: minikube + kubevirt.io/size: small + name: testvm + namespace: default +spec: + domain: + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + disks: + - disk: + bus: virtio + name: containerdisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - masquerade: {} + name: default + features: + acpi: + enabled: true + firmware: + uuid: 5a9fc181-957e-5c32-9e5a-2de5e9673531 + machine: + type: q35 + resources: + requests: + cpu: 100m + memory: 64M + networks: + - name: default + pod: {} + volumes: + - containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + imagePullPolicy: Always + name: containerdisk + - cloudInitNoCloud: + userDataBase64: SGkuXG4= + name: cloudinitdisk +status: + activePods: + 8190c35b-95ef-42a8-ab18-77dcfb729e79: minikube + conditions: + - lastProbeTime: "2021-09-14T23:07:57Z" + lastTransitionTime: "2021-09-14T23:07:57Z" + message: virt-launcher pod is terminating + reason: PodTerminating + status: "False" + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: "True" + type: LiveMigratable + guestOSInfo: {} + interfaces: + - ipAddress: 172.17.0.11 + ipAddresses: + - 172.17.0.11 + mac: 52:54:00:ea:73:fd + name: default + launcherContainerImageVersion: quay.io/kubevirt/virt-launcher:v0.45.0 + migrationMethod: BlockMigration + nodeName: minikube + phase: Succeeded + phaseTransitionTimestamps: + - phase: Pending + phaseTransitionTimestamp: "2021-09-14T22:27:08Z" + - phase: Scheduling + phaseTransitionTimestamp: "2021-09-14T22:27:08Z" + - phase: Scheduled + phaseTransitionTimestamp: "2021-09-14T22:27:15Z" + - phase: Running + phaseTransitionTimestamp: "2021-09-14T22:27:16Z" + - phase: Succeeded + phaseTransitionTimestamp: "2021-09-14T23:08:00Z" + qosClass: Burstable + virtualMachineRevisionName: revision-start-vm-40e44180-3dfb-4813-ace6-9f2d2c76c326-4 + volumeStatus: + - name: cloudinitdisk + target: vdb + - name: containerdisk + target: vda diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/health.lua b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/health.lua new file mode 100644 index 0000000..d99bf03 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/health.lua @@ -0,0 +1,14 @@ +local hs = {} +if obj.status.status == "Succeeded" then + hs.status = "Healthy" + hs.message = "KeptnAppVersion is healthy" + return hs +end +if obj.status.status == "Failed" then + hs.status = "Degraded" + hs.message = "KeptnAppVersion is degraded" + return hs +end +hs.status = "Progressing" +hs.message = "KeptnAppVersion is progressing" +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/health_test.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/health_test.yaml new file mode 100644 index 0000000..67f34f1 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Progressing + message: "KeptnAppVersion is progressing" + inputPath: testdata/progressing.yaml + - healthStatus: + status: Degraded + message: "KeptnAppVersion is degraded" + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: "KeptnAppVersion is healthy" + inputPath: testdata/healthy.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/degraded.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/degraded.yaml new file mode 100644 index 0000000..0473257 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/degraded.yaml @@ -0,0 +1,93 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnAppVersion +metadata: + annotations: + traceparent: 00-adc84f15413da737240dde75de2248fb-e3fcbc6fbdd05b1c-01 + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 1 + name: podtato-head-0.1.1 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnApp + name: podtato-head + uid: b0e9773a-3dc7-4639-a4bd-64c8bb3d33ab + resourceVersion: "226500" + uid: 9ae27184-c63d-4e50-bbc7-18e5e289e85c +spec: + appName: podtato-head + preDeploymentEvaluations: + - app-pre-deploy-eval-2 + revision: "1" + traceId: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-3777b2225e0d2dc3-01 + version: 0.1.1 + workloads: + - name: podtato-head-left-arm + version: 0.2.7 + - name: podtato-head-left-leg + version: 0.2.7 + - name: podtato-head-entry + version: 0.2.7 + - name: podtato-head-right-arm + version: 0.1.0 + - name: podtato-head-right-leg + version: 0.2.7 + - name: podtato-head-hat + version: 0.1.0 +status: + currentPhase: Completed + endTime: "2022-12-14T13:19:06Z" + phaseTraceIDs: + "": + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-d566f50faad914b0-01 + AppDeploy: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ae50f2d844888ab-01 + AppPostDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-f3dd12be90fc6c55-01 + AppPostDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ebcfa51b9b3a08b-01 + AppPreDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-b5838bedd1703826-01 + AppPreDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-b1c3977a55236160-01 + postDeploymentEvaluationStatus: Succeeded + postDeploymentStatus: Succeeded + preDeploymentEvaluationStatus: Succeeded + preDeploymentEvaluationTaskStatus: + - endTime: "2022-12-14T13:17:41Z" + evaluationDefinitionName: app-pre-deploy-eval-2 + evaluationName: pre-eval-app-pre-deploy-eval-2-48925 + startTime: "2022-12-14T13:17:36Z" + status: Succeeded + preDeploymentStatus: Succeeded + startTime: "2022-12-14T13:17:36Z" + status: Failed + workloadOverallStatus: Failed + workloadStatus: + - status: Failed + workload: + name: podtato-head-left-arm + version: 0.2.7 + - status: Progressing + workload: + name: podtato-head-left-leg + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-entry + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-right-arm + version: 0.1.0 + - status: Succeeded + workload: + name: podtato-head-right-leg + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-hat + version: 0.1.0 \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/healthy.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/healthy.yaml new file mode 100644 index 0000000..214a2c2 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/healthy.yaml @@ -0,0 +1,93 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnAppVersion +metadata: + annotations: + traceparent: 00-adc84f15413da737240dde75de2248fb-e3fcbc6fbdd05b1c-01 + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 1 + name: podtato-head-0.1.1 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnApp + name: podtato-head + uid: b0e9773a-3dc7-4639-a4bd-64c8bb3d33ab + resourceVersion: "226500" + uid: 9ae27184-c63d-4e50-bbc7-18e5e289e85c +spec: + appName: podtato-head + preDeploymentEvaluations: + - app-pre-deploy-eval-2 + revision: "1" + traceId: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-3777b2225e0d2dc3-01 + version: 0.1.1 + workloads: + - name: podtato-head-left-arm + version: 0.2.7 + - name: podtato-head-left-leg + version: 0.2.7 + - name: podtato-head-entry + version: 0.2.7 + - name: podtato-head-right-arm + version: 0.1.0 + - name: podtato-head-right-leg + version: 0.2.7 + - name: podtato-head-hat + version: 0.1.0 +status: + currentPhase: Completed + endTime: "2022-12-14T13:19:06Z" + phaseTraceIDs: + "": + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-d566f50faad914b0-01 + AppDeploy: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ae50f2d844888ab-01 + AppPostDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-f3dd12be90fc6c55-01 + AppPostDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ebcfa51b9b3a08b-01 + AppPreDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-b5838bedd1703826-01 + AppPreDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-b1c3977a55236160-01 + postDeploymentEvaluationStatus: Succeeded + postDeploymentStatus: Succeeded + preDeploymentEvaluationStatus: Succeeded + preDeploymentEvaluationTaskStatus: + - endTime: "2022-12-14T13:17:41Z" + evaluationDefinitionName: app-pre-deploy-eval-2 + evaluationName: pre-eval-app-pre-deploy-eval-2-48925 + startTime: "2022-12-14T13:17:36Z" + status: Succeeded + preDeploymentStatus: Succeeded + startTime: "2022-12-14T13:17:36Z" + status: Succeeded + workloadOverallStatus: Succeeded + workloadStatus: + - status: Succeeded + workload: + name: podtato-head-left-arm + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-left-leg + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-entry + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-right-arm + version: 0.1.0 + - status: Succeeded + workload: + name: podtato-head-right-leg + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-hat + version: 0.1.0 \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/progressing.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/progressing.yaml new file mode 100644 index 0000000..c9b80e7 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnAppVersion/testdata/progressing.yaml @@ -0,0 +1,93 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnAppVersion +metadata: + annotations: + traceparent: 00-adc84f15413da737240dde75de2248fb-e3fcbc6fbdd05b1c-01 + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 1 + name: podtato-head-0.1.1 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnApp + name: podtato-head + uid: b0e9773a-3dc7-4639-a4bd-64c8bb3d33ab + resourceVersion: "226500" + uid: 9ae27184-c63d-4e50-bbc7-18e5e289e85c +spec: + appName: podtato-head + preDeploymentEvaluations: + - app-pre-deploy-eval-2 + revision: "1" + traceId: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-3777b2225e0d2dc3-01 + version: 0.1.1 + workloads: + - name: podtato-head-left-arm + version: 0.2.7 + - name: podtato-head-left-leg + version: 0.2.7 + - name: podtato-head-entry + version: 0.2.7 + - name: podtato-head-right-arm + version: 0.1.0 + - name: podtato-head-right-leg + version: 0.2.7 + - name: podtato-head-hat + version: 0.1.0 +status: + currentPhase: Completed + endTime: "2022-12-14T13:19:06Z" + phaseTraceIDs: + "": + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-d566f50faad914b0-01 + AppDeploy: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ae50f2d844888ab-01 + AppPostDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-f3dd12be90fc6c55-01 + AppPostDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ebcfa51b9b3a08b-01 + AppPreDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-b5838bedd1703826-01 + AppPreDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-b1c3977a55236160-01 + postDeploymentEvaluationStatus: Succeeded + postDeploymentStatus: Succeeded + preDeploymentEvaluationStatus: Succeeded + preDeploymentEvaluationTaskStatus: + - endTime: "2022-12-14T13:17:41Z" + evaluationDefinitionName: app-pre-deploy-eval-2 + evaluationName: pre-eval-app-pre-deploy-eval-2-48925 + startTime: "2022-12-14T13:17:36Z" + status: Succeeded + preDeploymentStatus: Succeeded + startTime: "2022-12-14T13:17:36Z" + status: Progressing + workloadOverallStatus: Progressing + workloadStatus: + - status: Progressing + workload: + name: podtato-head-left-arm + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-left-leg + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-entry + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-right-arm + version: 0.1.0 + - status: Succeeded + workload: + name: podtato-head-right-leg + version: 0.2.7 + - status: Succeeded + workload: + name: podtato-head-hat + version: 0.1.0 \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/health.lua b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/health.lua new file mode 100644 index 0000000..97543ec --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/health.lua @@ -0,0 +1,14 @@ +local hs = {} +if obj.status.overallStatus == "Succeeded" then + hs.status = "Healthy" + hs.message = "KeptnEvaluation is healthy" + return hs +end +if obj.status.overallStatus == "Failed" then + hs.status = "Degraded" + hs.message = "KeptnEvaluation is degraded" + return hs +end +hs.status = "Progressing" +hs.message = "KeptnEvaluation is progressing" +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/health_test.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/health_test.yaml new file mode 100644 index 0000000..dd0378e --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Progressing + message: "KeptnEvaluation is progressing" + inputPath: testdata/progressing.yaml + - healthStatus: + status: Degraded + message: "KeptnEvaluation is degraded" + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: "KeptnEvaluation is healthy" + inputPath: testdata/healthy.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/degraded.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/degraded.yaml new file mode 100644 index 0000000..3054376 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/degraded.yaml @@ -0,0 +1,33 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnEvaluation +metadata: + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 1 + name: pre-eval-app-pre-deploy-eval-2-48925 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnAppVersion + name: podtato-head-0.1.1 + uid: 9ae27184-c63d-4e50-bbc7-18e5e289e85c + resourceVersion: "225737" + uid: 84acabb8-9dca-46ab-b099-4799a732ba43 +spec: + appName: podtato-head + appVersion: 0.1.1 + checkType: pre-eval + evaluationDefinition: app-pre-deploy-eval-2 + retries: 10 + retryInterval: 5s + workloadVersion: "" +status: + endTime: "2022-12-14T13:17:36Z" + evaluationStatus: + available-cpus: + status: Failed + value: "0.25" + overallStatus: Failed + retryCount: 1 + startTime: "2022-12-14T13:17:36Z" \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/healthy.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/healthy.yaml new file mode 100644 index 0000000..8c74e31 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/healthy.yaml @@ -0,0 +1,33 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnEvaluation +metadata: + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 1 + name: pre-eval-app-pre-deploy-eval-2-48925 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnAppVersion + name: podtato-head-0.1.1 + uid: 9ae27184-c63d-4e50-bbc7-18e5e289e85c + resourceVersion: "225737" + uid: 84acabb8-9dca-46ab-b099-4799a732ba43 +spec: + appName: podtato-head + appVersion: 0.1.1 + checkType: pre-eval + evaluationDefinition: app-pre-deploy-eval-2 + retries: 10 + retryInterval: 5s + workloadVersion: "" +status: + endTime: "2022-12-14T13:17:36Z" + evaluationStatus: + available-cpus: + status: Succeeded + value: "1.3100000000000005" + overallStatus: Succeeded + retryCount: 1 + startTime: "2022-12-14T13:17:36Z" \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/progressing.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/progressing.yaml new file mode 100644 index 0000000..ba50406 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnEvaluation/testdata/progressing.yaml @@ -0,0 +1,33 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnEvaluation +metadata: + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 1 + name: pre-eval-app-pre-deploy-eval-2-48925 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnAppVersion + name: podtato-head-0.1.1 + uid: 9ae27184-c63d-4e50-bbc7-18e5e289e85c + resourceVersion: "225737" + uid: 84acabb8-9dca-46ab-b099-4799a732ba43 +spec: + appName: podtato-head + appVersion: 0.1.1 + checkType: pre-eval + evaluationDefinition: app-pre-deploy-eval-2 + retries: 10 + retryInterval: 5s + workloadVersion: "" +status: + endTime: "2022-12-14T13:17:36Z" + evaluationStatus: + available-cpus: + status: Progressing + value: "" + overallStatus: Progressing + retryCount: 1 + startTime: "2022-12-14T13:17:36Z" \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/health.lua b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/health.lua new file mode 100644 index 0000000..d8c6047 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/health.lua @@ -0,0 +1,14 @@ +local hs = {} +if obj.status.status == "Succeeded" then + hs.status = "Healthy" + hs.message = "KeptnTask is healthy" + return hs +end +if obj.status.status == "Failed" then + hs.status = "Degraded" + hs.message = "KeptnTask is degraded" + return hs +end +hs.status = "Progressing" +hs.message = "KeptnTask is progressing" +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/health_test.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/health_test.yaml new file mode 100644 index 0000000..3d07164 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Progressing + message: "KeptnTask is progressing" + inputPath: testdata/progressing.yaml + - healthStatus: + status: Degraded + message: "KeptnTask is degraded" + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: "KeptnTask is healthy" + inputPath: testdata/healthy.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/degraded.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/degraded.yaml new file mode 100644 index 0000000..4055a56 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/degraded.yaml @@ -0,0 +1,36 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnTask +metadata: + creationTimestamp: "2022-12-14T13:17:56Z" + generation: 1 + name: pre-pre-deployment-check-entry-31363 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnWorkloadInstance + name: podtato-head-podtato-head-left-leg-0.2.7 + uid: 1376ab23-43d7-4304-89b3-1eea3a895a2d + resourceVersion: "226304" + uid: 2e89a2e6-44ba-47ed-8800-c0194e549ee0 +spec: + app: podtato-head + appVersion: "" + checkType: pre + context: + appName: "" + appVersion: "" + objectType: "" + taskType: "" + workloadName: "" + workloadVersion: "" + parameters: {} + secureParameters: {} + taskDefinition: pre-deployment-check-entry + workload: podtato-head-podtato-head-left-leg + workloadVersion: 0.2.7 +status: + jobName: klc-pre-pre-deployment-check--44910 + startTime: "2022-12-14T13:17:56Z" + status: Failed \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/healthy.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/healthy.yaml new file mode 100644 index 0000000..4939df9 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/healthy.yaml @@ -0,0 +1,37 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnTask +metadata: + creationTimestamp: "2022-12-14T13:17:56Z" + generation: 1 + name: pre-pre-deployment-check-entry-31363 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnWorkloadInstance + name: podtato-head-podtato-head-left-leg-0.2.7 + uid: 1376ab23-43d7-4304-89b3-1eea3a895a2d + resourceVersion: "226304" + uid: 2e89a2e6-44ba-47ed-8800-c0194e549ee0 +spec: + app: podtato-head + appVersion: "" + checkType: pre + context: + appName: "" + appVersion: "" + objectType: "" + taskType: "" + workloadName: "" + workloadVersion: "" + parameters: {} + secureParameters: {} + taskDefinition: pre-deployment-check-entry + workload: podtato-head-podtato-head-left-leg + workloadVersion: 0.2.7 +status: + endTime: "2022-12-14T13:18:46Z" + jobName: klc-pre-pre-deployment-check--44910 + startTime: "2022-12-14T13:17:56Z" + status: Succeeded \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/progressing.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/progressing.yaml new file mode 100644 index 0000000..342365c --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnTask/testdata/progressing.yaml @@ -0,0 +1,36 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnTask +metadata: + creationTimestamp: "2022-12-14T13:17:56Z" + generation: 1 + name: pre-pre-deployment-check-entry-31363 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnWorkloadInstance + name: podtato-head-podtato-head-left-leg-0.2.7 + uid: 1376ab23-43d7-4304-89b3-1eea3a895a2d + resourceVersion: "226304" + uid: 2e89a2e6-44ba-47ed-8800-c0194e549ee0 +spec: + app: podtato-head + appVersion: "" + checkType: pre + context: + appName: "" + appVersion: "" + objectType: "" + taskType: "" + workloadName: "" + workloadVersion: "" + parameters: {} + secureParameters: {} + taskDefinition: pre-deployment-check-entry + workload: podtato-head-podtato-head-left-leg + workloadVersion: 0.2.7 +status: + jobName: klc-pre-pre-deployment-check--44910 + startTime: "2022-12-14T13:17:56Z" + status: Progressing \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/health.lua b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/health.lua new file mode 100644 index 0000000..5ce79d0 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/health.lua @@ -0,0 +1,14 @@ +local hs = {} +if obj.status.status == "Succeeded" then + hs.status = "Healthy" + hs.message = "KeptnWorkloadInstance is healthy" + return hs +end +if obj.status.status == "Failed" then + hs.status = "Degraded" + hs.message = "KeptnWorkloadInstance is degraded" + return hs +end +hs.status = "Progressing" +hs.message = "KeptnWorkloadInstance is progressing" +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/health_test.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/health_test.yaml new file mode 100644 index 0000000..f27bad6 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Progressing + message: "KeptnWorkloadInstance is progressing" + inputPath: testdata/progressing.yaml + - healthStatus: + status: Degraded + message: "KeptnWorkloadInstance is degraded" + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: "KeptnWorkloadInstance is healthy" + inputPath: testdata/healthy.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/degraded.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/degraded.yaml new file mode 100644 index 0000000..324e4e0 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/degraded.yaml @@ -0,0 +1,50 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnWorkloadInstance +metadata: + annotations: + traceparent: 00-5050e556a9aaf22814aa689d0518f4d3-cbcff966a6d32c39-01 + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 2 + name: podtato-head-podtato-head-entry-0.2.7 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnWorkload + name: podtato-head-podtato-head-entry + uid: dcafe814-7f9d-4d50-9a66-f61c81bfe764 + resourceVersion: "226253" + uid: 6987404b-c7b9-40f5-95e9-d5aad55a3f3b +spec: + app: podtato-head + resourceReference: + kind: ReplicaSet + name: podtato-head-entry-6fc8964846 + uid: 2b6e44bf-27e3-4305-a9fb-65d2f412936b + traceId: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ae50f2d844888ab-01 + version: 0.2.7 + workloadName: podtato-head-podtato-head-entry +status: + currentPhase: PreDeployTasks + deploymentStatus: Succeeded + phaseTraceIDs: + "": + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-ca249d3f6e024547-01 + WorkloadDeploy: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-3be53185e6024eb4-01 + WorkloadPostDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0dc305a08a0ccf14-01 + WorkloadPostDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-4c7cf78cbbc40e14-01 + WorkloadPreDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-5eed0ec5420cfc89-01 + WorkloadPreDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-bef05615fc0138ac-01 + postDeploymentEvaluationStatus: Progressing + postDeploymentStatus: Progressing + preDeploymentEvaluationStatus: Failed + preDeploymentStatus: Failed + startTime: "2022-12-14T13:17:57Z" + status: Failed \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/healthy.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/healthy.yaml new file mode 100644 index 0000000..491501a --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/healthy.yaml @@ -0,0 +1,51 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnWorkloadInstance +metadata: + annotations: + traceparent: 00-5050e556a9aaf22814aa689d0518f4d3-cbcff966a6d32c39-01 + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 2 + name: podtato-head-podtato-head-entry-0.2.7 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnWorkload + name: podtato-head-podtato-head-entry + uid: dcafe814-7f9d-4d50-9a66-f61c81bfe764 + resourceVersion: "226253" + uid: 6987404b-c7b9-40f5-95e9-d5aad55a3f3b +spec: + app: podtato-head + resourceReference: + kind: ReplicaSet + name: podtato-head-entry-6fc8964846 + uid: 2b6e44bf-27e3-4305-a9fb-65d2f412936b + traceId: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ae50f2d844888ab-01 + version: 0.2.7 + workloadName: podtato-head-podtato-head-entry +status: + currentPhase: Completed + deploymentStatus: Succeeded + endTime: "2022-12-14T13:18:41Z" + phaseTraceIDs: + "": + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-ca249d3f6e024547-01 + WorkloadDeploy: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-3be53185e6024eb4-01 + WorkloadPostDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0dc305a08a0ccf14-01 + WorkloadPostDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-4c7cf78cbbc40e14-01 + WorkloadPreDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-5eed0ec5420cfc89-01 + WorkloadPreDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-bef05615fc0138ac-01 + postDeploymentEvaluationStatus: Succeeded + postDeploymentStatus: Succeeded + preDeploymentEvaluationStatus: Succeeded + preDeploymentStatus: Succeeded + startTime: "2022-12-14T13:17:57Z" + status: Succeeded \ No newline at end of file diff --git a/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/progressing.yaml b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/progressing.yaml new file mode 100644 index 0000000..b263c95 --- /dev/null +++ b/pkg/resource_customizations/lifecycle.keptn.sh/KeptnWorkloadInstance/testdata/progressing.yaml @@ -0,0 +1,50 @@ +apiVersion: lifecycle.keptn.sh/v1alpha2 +kind: KeptnWorkloadInstance +metadata: + annotations: + traceparent: 00-5050e556a9aaf22814aa689d0518f4d3-cbcff966a6d32c39-01 + creationTimestamp: "2022-12-14T13:17:36Z" + generation: 2 + name: podtato-head-podtato-head-entry-0.2.7 + namespace: podtato-kubectl + ownerReferences: + - apiVersion: lifecycle.keptn.sh/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: KeptnWorkload + name: podtato-head-podtato-head-entry + uid: dcafe814-7f9d-4d50-9a66-f61c81bfe764 + resourceVersion: "226253" + uid: 6987404b-c7b9-40f5-95e9-d5aad55a3f3b +spec: + app: podtato-head + resourceReference: + kind: ReplicaSet + name: podtato-head-entry-6fc8964846 + uid: 2b6e44bf-27e3-4305-a9fb-65d2f412936b + traceId: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0ae50f2d844888ab-01 + version: 0.2.7 + workloadName: podtato-head-podtato-head-entry +status: + currentPhase: Completed + deploymentStatus: Succeeded + phaseTraceIDs: + "": + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-ca249d3f6e024547-01 + WorkloadDeploy: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-3be53185e6024eb4-01 + WorkloadPostDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-0dc305a08a0ccf14-01 + WorkloadPostDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-4c7cf78cbbc40e14-01 + WorkloadPreDeployEvaluations: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-5eed0ec5420cfc89-01 + WorkloadPreDeployTasks: + traceparent: 00-ecdd1f5a7e1068ac9b0d044aa165ca4c-bef05615fc0138ac-01 + postDeploymentEvaluationStatus: Progressing + postDeploymentStatus: Progressing + preDeploymentEvaluationStatus: Succeeded + preDeploymentStatus: Succeeded + startTime: "2022-12-14T13:17:57Z" + status: Progressing \ No newline at end of file diff --git a/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/health.lua b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/health.lua new file mode 100644 index 0000000..b0278bb --- /dev/null +++ b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/health.lua @@ -0,0 +1,25 @@ +local health_status = {} + +if obj.status ~= nil and obj.status.conditions ~= nil then + + for i, condition in ipairs(obj.status.conditions) do + + health_status.message = condition.message + + if condition.status == "False" then + if condition.reason == "Failed" then + health_status.status = "Degraded" + return health_status + end + health_status.status = "Progressing" + return health_status + end + end + + health_status.status = "Healthy" + return health_status +end + +health_status.status = "Progressing" +health_status.message = "No status info available" +return health_status diff --git a/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/health_test.yaml b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/health_test.yaml new file mode 100644 index 0000000..f3dba1a --- /dev/null +++ b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/health_test.yaml @@ -0,0 +1,25 @@ +tests: +- healthStatus: + status: Progressing + message: "No status info available" + inputPath: testdata/no_status.yaml +- healthStatus: + status: Healthy + message: "Running" + inputPath: testdata/statefulset_ready.yaml +- healthStatus: + status: Progressing + message: "Not ready" + inputPath: testdata/statefulset_not_ready.yaml +- healthStatus: + status: Healthy + message: "Running" + inputPath: testdata/restore_complete.yaml +- healthStatus: + status: Progressing + message: "Restoring backup" + inputPath: testdata/restore_not_complete.yaml +- healthStatus: + status: Degraded + message: "Error creating ConfigMap" + inputPath: testdata/mariadb_error.yaml diff --git a/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/mariadb_error.yaml b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/mariadb_error.yaml new file mode 100644 index 0000000..030391d --- /dev/null +++ b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/mariadb_error.yaml @@ -0,0 +1,27 @@ +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: MariaDB +metadata: + name: mariadb-server +spec: + rootPasswordSecretKeyRef: + name: mariadb + key: root-password + image: + repository: mariadb + tag: "10.7.4" + pullPolicy: IfNotPresent + port: 3306 + volumeClaimTemplate: + resources: + requests: + storage: 100Mi + storageClassName: standard + accessModes: + - ReadWriteOnce +status: + conditions: + - lastTransitionTime: '2023-04-20T15:31:15Z' + message: Error creating ConfigMap + reason: Failed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/no_status.yaml b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/no_status.yaml new file mode 100644 index 0000000..276d09b --- /dev/null +++ b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/no_status.yaml @@ -0,0 +1,22 @@ +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: MariaDB +metadata: + name: mariadb-server +spec: + rootPasswordSecretKeyRef: + name: mariadb + key: root-password + image: + repository: mariadb + tag: "10.7.4" + pullPolicy: IfNotPresent + port: 3306 + volumeClaimTemplate: + resources: + requests: + storage: 100Mi + storageClassName: standard + accessModes: + - ReadWriteOnce +status: + revision: 0 diff --git a/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/restore_complete.yaml b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/restore_complete.yaml new file mode 100644 index 0000000..7a9358c --- /dev/null +++ b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/restore_complete.yaml @@ -0,0 +1,32 @@ +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: MariaDB +metadata: + name: mariadb-server +spec: + rootPasswordSecretKeyRef: + name: mariadb + key: root-password + image: + repository: mariadb + tag: "10.7.4" + pullPolicy: IfNotPresent + port: 3306 + volumeClaimTemplate: + resources: + requests: + storage: 100Mi + storageClassName: standard + accessModes: + - ReadWriteOnce +status: + conditions: + - lastTransitionTime: "2023-04-05T14:18:01Z" + message: Ready + reason: RestoreComplete + status: "True" + type: Bootstrapped + - lastTransitionTime: "2023-04-05T14:18:02Z" + message: Running + reason: RestoreComplete + status: "True" + type: Ready diff --git a/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/restore_not_complete.yaml b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/restore_not_complete.yaml new file mode 100644 index 0000000..a3f600e --- /dev/null +++ b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/restore_not_complete.yaml @@ -0,0 +1,32 @@ +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: MariaDB +metadata: + name: mariadb-server +spec: + rootPasswordSecretKeyRef: + name: mariadb + key: root-password + image: + repository: mariadb + tag: "10.7.4" + pullPolicy: IfNotPresent + port: 3306 + volumeClaimTemplate: + resources: + requests: + storage: 100Mi + storageClassName: standard + accessModes: + - ReadWriteOnce +status: + conditions: + - lastTransitionTime: "2023-04-05T14:18:01Z" + message: Restoring backup + reason: RestoreNotComplete + status: "False" + type: Ready + - lastTransitionTime: "2023-04-05T14:18:02Z" + message: Not ready + reason: RestoreNotComplete + status: "False" + type: Bootstrapped diff --git a/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/statefulset_not_ready.yaml b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/statefulset_not_ready.yaml new file mode 100644 index 0000000..96d46b4 --- /dev/null +++ b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/statefulset_not_ready.yaml @@ -0,0 +1,27 @@ +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: MariaDB +metadata: + name: mariadb-server +spec: + rootPasswordSecretKeyRef: + name: mariadb + key: root-password + image: + repository: mariadb + tag: "10.7.4" + pullPolicy: IfNotPresent + port: 3306 + volumeClaimTemplate: + resources: + requests: + storage: 100Mi + storageClassName: standard + accessModes: + - ReadWriteOnce +status: + conditions: + - lastTransitionTime: "2023-04-05T14:18:01Z" + message: Not ready + reason: StatefulSetNotReady + status: "False" + type: Ready diff --git a/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/statefulset_ready.yaml b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/statefulset_ready.yaml new file mode 100644 index 0000000..e4870b0 --- /dev/null +++ b/pkg/resource_customizations/mariadb.mmontes.io/MariaDB/testdata/statefulset_ready.yaml @@ -0,0 +1,27 @@ +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: MariaDB +metadata: + name: mariadb-server +spec: + rootPasswordSecretKeyRef: + name: mariadb + key: root-password + image: + repository: mariadb + tag: "10.7.4" + pullPolicy: IfNotPresent + port: 3306 + volumeClaimTemplate: + resources: + requests: + storage: 100Mi + storageClassName: standard + accessModes: + - ReadWriteOnce +status: + conditions: + - lastTransitionTime: "2023-04-05T14:18:01Z" + message: Running + reason: StatefulSetReady + status: "True" + type: Ready diff --git a/pkg/resource_customizations/minio.min.io/Tenant/health.lua b/pkg/resource_customizations/minio.min.io/Tenant/health.lua new file mode 100644 index 0000000..a62e3d2 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/health.lua @@ -0,0 +1,61 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.currentState ~= nil then + if obj.status.currentState == "Initialized" then + health_status.status = "Healthy" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState:find("^Provisioning") ~= nil then + health_status.status = "Progressing" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState:find("^Waiting") ~= nil then + health_status.status = "Progressing" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState:find("^Updating") ~= nil then + health_status.status = "Progressing" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState == "Restarting MinIO" then + health_status.status = "Progressing" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState == "Statefulset not controlled by operator" then + health_status.status = "Degraded" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState == "Another MinIO Tenant already exists in the namespace" then + health_status.status = "Degraded" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState == "Tenant credentials are not set properly" then + health_status.status = "Degraded" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState == "Different versions across MinIO Pools" then + health_status.status = "Degraded" + health_status.message = obj.status.currentState + return health_status + end + if obj.status.currentState == "Pool Decommissioning Not Allowed" then + health_status.status = "Degraded" + health_status.message = obj.status.currentState + return health_status + end + health_status.status = "Progressing" + health_status.message = obj.status.currentState + return health_status + end +end +health_status.status = "Progressing" +health_status.message = "No status info available" +return health_status diff --git a/pkg/resource_customizations/minio.min.io/Tenant/health_test.yaml b/pkg/resource_customizations/minio.min.io/Tenant/health_test.yaml new file mode 100644 index 0000000..c20b410 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/health_test.yaml @@ -0,0 +1,45 @@ +tests: +- healthStatus: + status: Healthy + message: "Initialized" + inputPath: testdata/initialized.yaml +- healthStatus: + status: Progressing + message: "Provisioning MinIO Cluster IP Service" + inputPath: testdata/provisioning.yaml +- healthStatus: + status: Progressing + message: "Waiting for Pods to be ready" + inputPath: testdata/waiting.yaml +- healthStatus: + status: Progressing + message: "Updating MinIO Version" + inputPath: testdata/updating.yaml +- healthStatus: + status: Degraded + message: "Statefulset not controlled by operator" + inputPath: testdata/out_of_control.yaml +- healthStatus: + status: Degraded + message: "Another MinIO Tenant already exists in the namespace" + inputPath: testdata/another_tenant_exists.yaml +- healthStatus: + status: Degraded + message: "Tenant credentials are not set properly" + inputPath: testdata/incorrect_tenant_credentials.yaml +- healthStatus: + status: Degraded + message: "Different versions across MinIO Pools" + inputPath: testdata/versions_mismatch.yaml +- healthStatus: + status: Degraded + message: "Pool Decommissioning Not Allowed" + inputPath: testdata/pool_decommissioning_not_allowed.yaml +- healthStatus: + status: Progressing + message: "" + inputPath: testdata/unknown_status_message.yaml +- healthStatus: + status: Progressing + message: "No status info available" + inputPath: testdata/no_status.yaml diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/another_tenant_exists.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/another_tenant_exists.yaml new file mode 100644 index 0000000..5ca4d78 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/another_tenant_exists.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Another MinIO Tenant already exists in the namespace diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/incorrect_tenant_credentials.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/incorrect_tenant_credentials.yaml new file mode 100644 index 0000000..18e3140 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/incorrect_tenant_credentials.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Tenant credentials are not set properly diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/initialized.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/initialized.yaml new file mode 100644 index 0000000..74018d3 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/initialized.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Initialized diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/no_status.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/no_status.yaml new file mode 100644 index 0000000..3e58e86 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/no_status.yaml @@ -0,0 +1,12 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/out_of_control.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/out_of_control.yaml new file mode 100644 index 0000000..ea25763 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/out_of_control.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Statefulset not controlled by operator diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/pool_decommissioning_not_allowed.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/pool_decommissioning_not_allowed.yaml new file mode 100644 index 0000000..b4d94fb --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/pool_decommissioning_not_allowed.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Pool Decommissioning Not Allowed diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/provisioning.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/provisioning.yaml new file mode 100644 index 0000000..05c9ba3 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/provisioning.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Provisioning MinIO Cluster IP Service diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/restarting_minio.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/restarting_minio.yaml new file mode 100644 index 0000000..11c37d5 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/restarting_minio.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Restarting MinIO diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/unknown_status_message.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/unknown_status_message.yaml new file mode 100644 index 0000000..862560e --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/unknown_status_message.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/updating.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/updating.yaml new file mode 100644 index 0000000..a32c18d --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/updating.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Updating MinIO Version diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/versions_mismatch.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/versions_mismatch.yaml new file mode 100644 index 0000000..31f79d7 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/versions_mismatch.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Different versions across MinIO Pools diff --git a/pkg/resource_customizations/minio.min.io/Tenant/testdata/waiting.yaml b/pkg/resource_customizations/minio.min.io/Tenant/testdata/waiting.yaml new file mode 100644 index 0000000..3476622 --- /dev/null +++ b/pkg/resource_customizations/minio.min.io/Tenant/testdata/waiting.yaml @@ -0,0 +1,13 @@ +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: minio-tenant +spec: + image: minio/minio:latest + pools: + - name: pool-0 + servers: 1 + volumesPerServer: 4 +status: + revision: 0 + currentState: Waiting for Pods to be ready diff --git a/pkg/resource_customizations/monitoring.coreos.com/Prometheus/health.lua b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/health.lua new file mode 100644 index 0000000..b0b052c --- /dev/null +++ b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/health.lua @@ -0,0 +1,23 @@ +local hs={ status = "Progressing", message = "Waiting for initialization" } + +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + if condition.type == "Available" and condition.status ~= "True" then + if condition.reason == "SomePodsNotReady" then + hs.status = "Progressing" + else + hs.status = "Degraded" + end + hs.message = condition.message or condition.reason + end + if condition.type == "Available" and condition.status == "True" then + hs.status = "Healthy" + hs.message = "All instances are available" + end + end + end +end + +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/monitoring.coreos.com/Prometheus/health_test.yaml b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/health_test.yaml new file mode 100644 index 0000000..91e135e --- /dev/null +++ b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Healthy + message: "All instances are available" + inputPath: testdata/healthy.yaml + - healthStatus: + status: Progressing + message: "SomePodsNotReady" + inputPath: testdata/progressing.yaml + - healthStatus: + status: Degraded + message: "shard 0: pod prometheus-prometheus-stack-kube-prom-prometheus-0: 0/5 nodes are available: 2 node(s) didn't match Pod's node affinity/selector, 3 node(s) were unschedulable.\nshard 0: pod prometheus-prometheus-stack-kube-prom-prometheus-1: 0/5 nodes are available: 2 node(s) didn't match Pod's node affinity/selector, 3 node(s) were unschedulable." + inputPath: testdata/degraded.yaml diff --git a/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/degraded.yaml b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/degraded.yaml new file mode 100644 index 0000000..d9a2527 --- /dev/null +++ b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/degraded.yaml @@ -0,0 +1,142 @@ +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + annotations: + argocd.argoproj.io/tracking-id: >- + prometheus-stack:monitoring.coreos.com/Prometheus:prometheus/prometheus-stack-kube-prom-prometheus + creationTimestamp: '2021-12-09T15:51:10Z' + generation: 46 + labels: + app: kube-prometheus-stack-prometheus + app.kubernetes.io/instance: prometheus-stack + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kube-prometheus-stack + app.kubernetes.io/version: 39.11.0 + chart: kube-prometheus-stack-39.11.0 + heritage: Helm + release: prometheus-stack + name: prometheus-stack-kube-prom-prometheus + namespace: prometheus + resourceVersion: '200320271' + uid: 6f2e1016-926d-44e7-945b-dec4c975595b +spec: + additionalScrapeConfigs: + key: prometheus-additional.yaml + name: additional-scrape-configs + alerting: + alertmanagers: + - apiVersion: v2 + name: prometheus-stack-kube-prom-alertmanager + namespace: prometheus + pathPrefix: / + port: http-web + containers: + - name: prometheus + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: config-reloader + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + enableAdminAPI: false + evaluationInterval: 30s + externalUrl: 'http://prometheus-stack-kube-prom-prometheus.prometheus:9090' + image: 'quay.io/prometheus/prometheus:v2.37.0' + imagePullSecrets: + - name: mcps-registry-image-pull-secret + initContainers: + - name: init-config-reloader + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + listenLocal: false + logFormat: logfmt + logLevel: info + paused: false + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + portName: http-web + probeNamespaceSelector: {} + probeSelector: {} + replicas: 2 + resources: + requests: + memory: 700Mi + retention: 6h + routePrefix: / + ruleNamespaceSelector: {} + ruleSelector: {} + scrapeInterval: 10s + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: prometheus-stack-kube-prom-prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + shards: 1 + storage: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: default + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/name: prometheus + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/name: prometheus + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + version: v2.37.0 +status: + availableReplicas: 0 + conditions: + - lastTransitionTime: '2022-09-02T14:55:03Z' + message: >- + shard 0: pod prometheus-prometheus-stack-kube-prom-prometheus-0: 0/5 + nodes are available: 2 node(s) didn't match Pod's node + affinity/selector, 3 node(s) were unschedulable. + + shard 0: pod prometheus-prometheus-stack-kube-prom-prometheus-1: 0/5 + nodes are available: 2 node(s) didn't match Pod's node + affinity/selector, 3 node(s) were unschedulable. + reason: NoPodReady + status: 'False' + type: Available + - lastTransitionTime: '2022-09-02T09:57:03Z' + status: 'True' + type: Reconciled + paused: false + replicas: 2 + shardStatuses: + - availableReplicas: 0 + replicas: 2 + shardID: '0' + unavailableReplicas: 2 + updatedReplicas: 2 + unavailableReplicas: 2 + updatedReplicas: 2 diff --git a/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/healthy.yaml b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/healthy.yaml new file mode 100644 index 0000000..98bf17b --- /dev/null +++ b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/healthy.yaml @@ -0,0 +1,130 @@ +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + annotations: + argocd.argoproj.io/tracking-id: prometheus-stack:monitoring.coreos.com/Prometheus:prometheus/prometheus-stack-kube-prom-prometheus + creationTimestamp: "2021-12-09T15:51:10Z" + generation: 46 + labels: + app: kube-prometheus-stack-prometheus + app.kubernetes.io/instance: prometheus-stack + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kube-prometheus-stack + app.kubernetes.io/version: 39.11.0 + chart: kube-prometheus-stack-39.11.0 + heritage: Helm + release: prometheus-stack + name: prometheus-stack-kube-prom-prometheus + namespace: prometheus + resourceVersion: "200165695" + uid: 6f2e1016-926d-44e7-945b-dec4c975595b +spec: + additionalScrapeConfigs: + key: prometheus-additional.yaml + name: additional-scrape-configs + alerting: + alertmanagers: + - apiVersion: v2 + name: prometheus-stack-kube-prom-alertmanager + namespace: prometheus + pathPrefix: / + port: http-web + containers: + - name: prometheus + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: config-reloader + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + enableAdminAPI: false + evaluationInterval: 30s + externalUrl: http://prometheus-stack-kube-prom-prometheus.prometheus:9090 + image: quay.io/prometheus/prometheus:v2.37.0 + initContainers: + - name: init-config-reloader + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + listenLocal: false + logFormat: logfmt + logLevel: info + paused: false + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + portName: http-web + probeNamespaceSelector: {} + probeSelector: {} + replicas: 2 + resources: + requests: + memory: 700Mi + retention: 6h + routePrefix: / + ruleNamespaceSelector: {} + ruleSelector: {} + scrapeInterval: 10s + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: prometheus-stack-kube-prom-prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + shards: 1 + storage: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: default + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/name: prometheus + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/name: prometheus + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + version: v2.37.0 +status: + availableReplicas: 2 + conditions: + - lastTransitionTime: "2022-09-01T19:54:00Z" + status: "True" + type: Available + - lastTransitionTime: "2022-09-02T09:57:03Z" + status: "True" + type: Reconciled + paused: false + replicas: 2 + shardStatuses: + - availableReplicas: 2 + replicas: 2 + shardID: "0" + unavailableReplicas: 0 + updatedReplicas: 2 + unavailableReplicas: 0 + updatedReplicas: 2 diff --git a/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/progressing.yaml b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/progressing.yaml new file mode 100644 index 0000000..3700eb7 --- /dev/null +++ b/pkg/resource_customizations/monitoring.coreos.com/Prometheus/testdata/progressing.yaml @@ -0,0 +1,132 @@ +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + annotations: + argocd.argoproj.io/tracking-id: >- + prometheus-stack:monitoring.coreos.com/Prometheus:prometheus/prometheus-stack-kube-prom-prometheus + creationTimestamp: '2021-12-09T15:51:10Z' + generation: 46 + labels: + app: kube-prometheus-stack-prometheus + app.kubernetes.io/instance: prometheus-stack + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kube-prometheus-stack + app.kubernetes.io/version: 39.11.0 + chart: kube-prometheus-stack-39.11.0 + heritage: Helm + release: prometheus-stack + name: prometheus-stack-kube-prom-prometheus + namespace: prometheus + resourceVersion: '200307978' + uid: 6f2e1016-926d-44e7-945b-dec4c975595b +spec: + additionalScrapeConfigs: + key: prometheus-additional.yaml + name: additional-scrape-configs + alerting: + alertmanagers: + - apiVersion: v2 + name: prometheus-stack-kube-prom-alertmanager + namespace: prometheus + pathPrefix: / + port: http-web + containers: + - name: prometheus + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: config-reloader + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + enableAdminAPI: false + evaluationInterval: 30s + externalUrl: 'http://prometheus-stack-kube-prom-prometheus.prometheus:9090' + image: 'quay.io/prometheus/prometheus:v2.37.0' + initContainers: + - name: init-config-reloader + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + listenLocal: false + logFormat: logfmt + logLevel: info + paused: false + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + portName: http-web + probeNamespaceSelector: {} + probeSelector: {} + replicas: 2 + resources: + requests: + memory: 700Mi + retention: 6h + routePrefix: / + ruleNamespaceSelector: {} + ruleSelector: {} + scrapeInterval: 10s + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: prometheus-stack-kube-prom-prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + shards: 1 + storage: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: default + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/name: prometheus + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/name: prometheus + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + version: v2.37.0 +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: '2022-09-02T14:34:39Z' + reason: SomePodsNotReady + status: Degraded + type: Available + - lastTransitionTime: '2022-09-02T09:57:03Z' + status: 'True' + type: Reconciled + paused: false + replicas: 2 + shardStatuses: + - availableReplicas: 1 + replicas: 2 + shardID: '0' + unavailableReplicas: 1 + updatedReplicas: 1 + unavailableReplicas: 1 + updatedReplicas: 1 diff --git a/pkg/resource_customizations/networking.gke.io/ManagedCertificate/health.lua b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/health.lua new file mode 100644 index 0000000..b46263f --- /dev/null +++ b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/health.lua @@ -0,0 +1,23 @@ +local hs = {} + +if obj.status ~= nil then + if obj.status.domainStatus ~= nil then + for i, domainStatus in ipairs(obj.status.domainStatus) do + if domainStatus.status == "FailedNotVisible" then + hs.status = "Degraded" + hs.message = "At least one certificate has failed to be provisioned" + return hs + end + end + end +end + +if obj.status ~= nil and obj.status.certificateStatus == "Active" then + hs.status = "Healthy" + hs.message = "All certificates are active" + return hs +end + +hs.status = "Progressing" +hs.message = "At least one certificate is still being provisioned" +return hs diff --git a/pkg/resource_customizations/networking.gke.io/ManagedCertificate/health_test.yaml b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/health_test.yaml new file mode 100644 index 0000000..5dbc1fb --- /dev/null +++ b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/health_test.yaml @@ -0,0 +1,13 @@ +tests: + - healthStatus: + status: Progressing + message: "At least one certificate is still being provisioned" + inputPath: testdata/provisioning.yaml + - healthStatus: + status: Degraded + message: "At least one certificate has failed to be provisioned" + inputPath: testdata/failed.yaml + - healthStatus: + status: Healthy + message: "All certificates are active" + inputPath: testdata/active.yaml diff --git a/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/active.yaml b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/active.yaml new file mode 100644 index 0000000..f28aec6 --- /dev/null +++ b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/active.yaml @@ -0,0 +1,29 @@ +apiVersion: networking.gke.io/v1beta2 +kind: ManagedCertificate +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"networking.gke.io/v1beta2","kind":"ManagedCertificate","metadata":{"annotations":{},"name":"web","namespace":"default"},"spec":{"domains":["argocdtest.micke.me", "argocdtest2.micke.me", "argocdtest3.micke.me"]}} + creationTimestamp: "2020-05-16T19:35:14Z" + generation: 2 + name: web + namespace: default + resourceVersion: "2387" + selfLink: /apis/networking.gke.io/v1beta2/namespaces/default/managedcertificates/web + uid: 8011785f-233f-4e29-a68d-81945a342f08 +spec: + domains: + - argocdtest.micke.me + - argocdtest2.micke.me + - argocdtest3.micke.me +status: + certificateName: mcrt-0a994e84-76b2-4ba3-9819-2a4ec5c0637e + certificateStatus: Active + domainStatus: + - domain: argocdtest.micke.me + status: Active + - domain: argocdtest2.micke.me + status: Active + - domain: argocdtest3.micke.me + status: Active + expireTime: "2020-06-21T00:33:40.000-07:00" diff --git a/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/failed.yaml b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/failed.yaml new file mode 100644 index 0000000..0343e46 --- /dev/null +++ b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/failed.yaml @@ -0,0 +1,28 @@ +apiVersion: networking.gke.io/v1beta2 +kind: ManagedCertificate +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"networking.gke.io/v1beta2","kind":"ManagedCertificate","metadata":{"annotations":{},"name":"web","namespace":"default"},"spec":{"domains":["argocdtest.micke.me", "argocdtest2.micke.me", "argocdtest3.micke.me"]}} + creationTimestamp: "2020-05-16T19:35:14Z" + generation: 2 + name: web + namespace: default + resourceVersion: "2387" + selfLink: /apis/networking.gke.io/v1beta2/namespaces/default/managedcertificates/web + uid: 8011785f-233f-4e29-a68d-81945a342f08 +spec: + domains: + - argocdtest.micke.me + - argocdtest2.micke.me + - argocdtest3.micke.me +status: + certificateName: mcrt-0a994e84-76b2-4ba3-9819-2a4ec5c0637e + certificateStatus: Provisioning + domainStatus: + - domain: argocdtest.micke.me + status: Active + - domain: argocdtest2.micke.me + status: Provisioning + - domain: argocdtest3.micke.me + status: FailedNotVisible diff --git a/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/provisioning.yaml b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/provisioning.yaml new file mode 100644 index 0000000..37e20cc --- /dev/null +++ b/pkg/resource_customizations/networking.gke.io/ManagedCertificate/testdata/provisioning.yaml @@ -0,0 +1,28 @@ +apiVersion: networking.gke.io/v1beta2 +kind: ManagedCertificate +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"networking.gke.io/v1beta2","kind":"ManagedCertificate","metadata":{"annotations":{},"name":"web","namespace":"default"},"spec":{"domains":["argocdtest.micke.me", "argocdtest2.micke.me", "argocdtest3.micke.me"]}} + creationTimestamp: "2020-05-16T19:35:14Z" + generation: 2 + name: web + namespace: default + resourceVersion: "2387" + selfLink: /apis/networking.gke.io/v1beta2/namespaces/default/managedcertificates/web + uid: 8011785f-233f-4e29-a68d-81945a342f08 +spec: + domains: + - argocdtest.micke.me + - argocdtest2.micke.me + - argocdtest3.micke.me +status: + certificateName: mcrt-0a994e84-76b2-4ba3-9819-2a4ec5c0637e + certificateStatus: Provisioning + domainStatus: + - domain: argocdtest.micke.me + status: Active + - domain: argocdtest2.micke.me + status: Active + - domain: argocdtest3.micke.me + status: Provisioning diff --git a/pkg/resource_customizations/onepassword.com/OnePasswordItem/health.lua b/pkg/resource_customizations/onepassword.com/OnePasswordItem/health.lua new file mode 100644 index 0000000..bce3f06 --- /dev/null +++ b/pkg/resource_customizations/onepassword.com/OnePasswordItem/health.lua @@ -0,0 +1,21 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for controller to report status" +return hs diff --git a/pkg/resource_customizations/onepassword.com/OnePasswordItem/health_test.yaml b/pkg/resource_customizations/onepassword.com/OnePasswordItem/health_test.yaml new file mode 100644 index 0000000..614f588 --- /dev/null +++ b/pkg/resource_customizations/onepassword.com/OnePasswordItem/health_test.yaml @@ -0,0 +1,12 @@ +tests: +- healthStatus: + status: Progressing + message: Waiting for controller to report status + inputPath: testdata/new.yaml +- healthStatus: + status: Degraded + message: Error + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/degraded.yaml b/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/degraded.yaml new file mode 100644 index 0000000..4f78f5d --- /dev/null +++ b/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/degraded.yaml @@ -0,0 +1,13 @@ +apiVersion: onepassword.com/v1 +kind: OnePasswordItem +metadata: + name: degraded-item + namespace: test-namespace +spec: + itemPath: vaults/Test Vault/items/Test Item +status: + conditions: + - lastTransitionTime: '2022-09-24T01:11:54Z' + message: Error + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/healthy.yaml b/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/healthy.yaml new file mode 100644 index 0000000..efc9d48 --- /dev/null +++ b/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/healthy.yaml @@ -0,0 +1,12 @@ +apiVersion: onepassword.com/v1 +kind: OnePasswordItem +metadata: + name: healthy-item + namespace: test-namespace +spec: + itemPath: vaults/Test Vault/items/Test Item +status: + conditions: + - lastTransitionTime: '2022-09-24T01:11:54Z' + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/new.yaml b/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/new.yaml new file mode 100644 index 0000000..0164ebc --- /dev/null +++ b/pkg/resource_customizations/onepassword.com/OnePasswordItem/testdata/new.yaml @@ -0,0 +1,7 @@ +apiVersion: onepassword.com/v1 +kind: OnePasswordItem +metadata: + name: new-item + namespace: test-namespace +spec: + itemPath: vaults/Test Vault/items/Test Item diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeEventing/health.lua b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/health.lua new file mode 100644 index 0000000..aadf6ca --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/health.lua @@ -0,0 +1,36 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + local numTrue = 0 + local numFalse = 0 + local msg = "" + for i, condition in pairs(obj.status.conditions) do + msg = msg .. i .. ": " .. condition.type .. " | " .. condition.status .. "\n" + if condition.type == "Ready" and condition.status == "True" then + numTrue = numTrue + 1 + elseif condition.type == "InstallSucceeded" and condition.status == "True" then + numTrue = numTrue + 1 + elseif condition.type == "Ready" and condition.status == "False" then + numFalse = numFalse + 1 + elseif condition.status == "Unknown" then + numFalse = numFalse + 1 + end + end + if(numFalse > 0) then + health_status.message = msg + health_status.status = "Progressing" + return health_status + elseif(numTrue == 2) then + health_status.message = "KnativeEventing is healthy." + health_status.status = "Healthy" + return health_status + else + health_status.message = msg + health_status.status = "Degraded" + return health_status + end + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for KnativeEventing" +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeEventing/health_test.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/health_test.yaml new file mode 100644 index 0000000..e8d9c4a --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/health_test.yaml @@ -0,0 +1,17 @@ +tests: +- healthStatus: + status: Progressing + message: "1: InstallSucceeded | True\n2: Ready | Unknown\n" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Progressing + message: "1: InstallSucceeded | True\n2: Ready | False\n" + inputPath: testdata/progressing_ready_false.yaml +- healthStatus: + status: Degraded + message: "1: InstallSucceeded | True\n2: Ready | randomstatus\n" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "KnativeEventing is healthy." + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/degraded.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/degraded.yaml new file mode 100644 index 0000000..396692a --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/degraded.yaml @@ -0,0 +1,25 @@ +apiVersion: operator.knative.dev/v1alpha1 +kind: KnativeEventing +metadata: + finalizers: + - delete-knative-eventing-manifest + generation: 3 + labels: + app: knative-eventing + argocd.argoproj.io/instance: knative-eventing + version: 0.14.1 + name: knative-eventing + namespace: knative-eventing + resourceVersion: "31707154" + selfLink: /apis/operator.knative.dev/v1alpha1/namespaces/knative-eventing/knativeeventings/knative-eventing + uid: bbcb2872-4304-4541-9811-7ce5125ceadf +spec: {} +status: + conditions: + - lastTransitionTime: "2020-04-29T04:52:05Z" + status: "True" + type: InstallSucceeded + - lastTransitionTime: "2020-06-08T10:53:44Z" + status: "randomstatus" + type: Ready + version: 0.14.1 \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/healthy.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/healthy.yaml new file mode 100644 index 0000000..2bba2e1 --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/healthy.yaml @@ -0,0 +1,25 @@ +apiVersion: operator.knative.dev/v1alpha1 +kind: KnativeEventing +metadata: + finalizers: + - delete-knative-eventing-manifest + generation: 3 + labels: + app: knative-eventing + argocd.argoproj.io/instance: knative-eventing + version: 0.14.1 + name: knative-eventing + namespace: knative-eventing + resourceVersion: "31707154" + selfLink: /apis/operator.knative.dev/v1alpha1/namespaces/knative-eventing/knativeeventings/knative-eventing + uid: bbcb2872-4304-4541-9811-7ce5125ceadf +spec: {} +status: + conditions: + - lastTransitionTime: "2020-04-29T04:52:05Z" + status: "True" + type: InstallSucceeded + - lastTransitionTime: "2020-06-08T10:53:44Z" + status: "True" + type: Ready + version: 0.14.1 \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/progressing.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/progressing.yaml new file mode 100644 index 0000000..9c1cadb --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/progressing.yaml @@ -0,0 +1,25 @@ +apiVersion: operator.knative.dev/v1alpha1 +kind: KnativeEventing +metadata: + finalizers: + - delete-knative-eventing-manifest + generation: 3 + labels: + app: knative-eventing + argocd.argoproj.io/instance: knative-eventing + version: 0.14.1 + name: knative-eventing + namespace: knative-eventing + resourceVersion: "31707154" + selfLink: /apis/operator.knative.dev/v1alpha1/namespaces/knative-eventing/knativeeventings/knative-eventing + uid: bbcb2872-4304-4541-9811-7ce5125ceadf +spec: {} +status: + conditions: + - lastTransitionTime: "2020-04-29T04:52:05Z" + status: "True" + type: InstallSucceeded + - lastTransitionTime: "2020-06-08T10:53:44Z" + status: "Unknown" + type: Ready + version: 0.14.1 \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/progressing_ready_false.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/progressing_ready_false.yaml new file mode 100644 index 0000000..5236b57 --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeEventing/testdata/progressing_ready_false.yaml @@ -0,0 +1,25 @@ +apiVersion: operator.knative.dev/v1alpha1 +kind: KnativeEventing +metadata: + finalizers: + - delete-knative-eventing-manifest + generation: 3 + labels: + app: knative-eventing + argocd.argoproj.io/instance: knative-eventing + version: 0.14.1 + name: knative-eventing + namespace: knative-eventing + resourceVersion: "31707154" + selfLink: /apis/operator.knative.dev/v1alpha1/namespaces/knative-eventing/knativeeventings/knative-eventing + uid: bbcb2872-4304-4541-9811-7ce5125ceadf +spec: {} +status: + conditions: + - lastTransitionTime: "2020-04-29T04:52:05Z" + status: "True" + type: InstallSucceeded + - lastTransitionTime: "2020-06-08T10:53:44Z" + status: "False" + type: Ready + version: 0.14.1 \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeServing/health.lua b/pkg/resource_customizations/operator.knative.dev/KnativeServing/health.lua new file mode 100644 index 0000000..e412a15 --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeServing/health.lua @@ -0,0 +1,42 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + local numTrue = 0 + local numFalse = 0 + local msg = "" + for i, condition in pairs(obj.status.conditions) do + msg = msg .. i .. ": " .. condition.type .. " | " .. condition.status .. "\n" + if condition.type == "Ready" and condition.status == "True" then + numTrue = numTrue + 1 + elseif condition.type == "InstallSucceeded" and condition.status == "True" then + numTrue = numTrue + 1 + elseif condition.type == "DependenciesInstalled" and condition.status == "True" then + numTrue = numTrue + 1 + elseif condition.type == "DeploymentsAvailable" and condition.status == "True" then + numTrue = numTrue + 1 + elseif condition.type == "Ready" and condition.status == "False" then + numFalse = numFalse + 1 + elseif condition.type == "DeploymentsAvailable" and condition.status == "False" then + numFalse = numFalse + 1 + elseif condition.status == "Unknown" then + numFalse = numFalse + 1 + end + end + if(numFalse > 0) then + health_status.message = msg + health_status.status = "Progressing" + return health_status + elseif(numTrue == 4) then + health_status.message = "KnativeServing is healthy." + health_status.status = "Healthy" + return health_status + else + health_status.message = msg + health_status.status = "Degraded" + return health_status + end + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for KnativeServing" +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeServing/health_test.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeServing/health_test.yaml new file mode 100644 index 0000000..61fb179 --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeServing/health_test.yaml @@ -0,0 +1,17 @@ +tests: +- healthStatus: + status: Progressing + message: "1: DependenciesInstalled | True\n2: DeploymentsAvailable | True\n3: InstallSucceeded | True\n4: Ready | Unknown\n" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Progressing + message: "1: DependenciesInstalled | True\n2: DeploymentsAvailable | True\n3: InstallSucceeded | True\n4: Ready | False\n" + inputPath: testdata/progressing_ready_false.yaml +- healthStatus: + status: Degraded + message: "1: DependenciesInstalled | True\n2: DeploymentsAvailable | True\n3: InstallSucceeded | True\n4: Ready | randomstatus\n" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "KnativeServing is healthy." + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/degraded.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/degraded.yaml new file mode 100644 index 0000000..fe73fcf --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/degraded.yaml @@ -0,0 +1,32 @@ +apiVersion: operator.knative.dev/v1alpha1 +kind: KnativeServing +metadata: + finalizers: + - knativeservings.operator.knative.dev + - delete-knative-serving-manifest + generation: 2 + labels: + app: knative-serving + argocd.argoproj.io/instance: knative-serving + version: 0.14.1 + name: knative-serving + namespace: knative-serving + resourceVersion: "31845507" + selfLink: /apis/operator.knative.dev/v1alpha1/namespaces/knative-serving/knativeservings/knative-serving + uid: ccbe43aa-ad72-4c2b-a5bb-21b3815d8a9b +spec: {} +status: + conditions: + - lastTransitionTime: "2020-05-18T18:21:29Z" + status: "True" + type: DependenciesInstalled + - lastTransitionTime: "2020-06-08T15:28:11Z" + status: "True" + type: DeploymentsAvailable + - lastTransitionTime: "2020-05-18T18:21:29Z" + status: "True" + type: InstallSucceeded + - lastTransitionTime: "2020-06-08T15:28:11Z" + status: "randomstatus" + type: Ready + version: 0.14.0 \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/healthy.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/healthy.yaml new file mode 100644 index 0000000..8001044 --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/healthy.yaml @@ -0,0 +1,32 @@ +apiVersion: operator.knative.dev/v1alpha1 +kind: KnativeServing +metadata: + finalizers: + - knativeservings.operator.knative.dev + - delete-knative-serving-manifest + generation: 2 + labels: + app: knative-serving + argocd.argoproj.io/instance: knative-serving + version: 0.14.1 + name: knative-serving + namespace: knative-serving + resourceVersion: "31845507" + selfLink: /apis/operator.knative.dev/v1alpha1/namespaces/knative-serving/knativeservings/knative-serving + uid: ccbe43aa-ad72-4c2b-a5bb-21b3815d8a9b +spec: {} +status: + conditions: + - lastTransitionTime: "2020-05-18T18:21:29Z" + status: "True" + type: DependenciesInstalled + - lastTransitionTime: "2020-06-08T15:28:11Z" + status: "True" + type: DeploymentsAvailable + - lastTransitionTime: "2020-05-18T18:21:29Z" + status: "True" + type: InstallSucceeded + - lastTransitionTime: "2020-06-08T15:28:11Z" + status: "True" + type: Ready + version: 0.14.0 \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/progressing.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/progressing.yaml new file mode 100644 index 0000000..83a9c69 --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/progressing.yaml @@ -0,0 +1,32 @@ +apiVersion: operator.knative.dev/v1alpha1 +kind: KnativeServing +metadata: + finalizers: + - knativeservings.operator.knative.dev + - delete-knative-serving-manifest + generation: 2 + labels: + app: knative-serving + argocd.argoproj.io/instance: knative-serving + version: 0.14.1 + name: knative-serving + namespace: knative-serving + resourceVersion: "31845507" + selfLink: /apis/operator.knative.dev/v1alpha1/namespaces/knative-serving/knativeservings/knative-serving + uid: ccbe43aa-ad72-4c2b-a5bb-21b3815d8a9b +spec: {} +status: + conditions: + - lastTransitionTime: "2020-05-18T18:21:29Z" + status: "True" + type: DependenciesInstalled + - lastTransitionTime: "2020-06-08T15:28:11Z" + status: "True" + type: DeploymentsAvailable + - lastTransitionTime: "2020-05-18T18:21:29Z" + status: "True" + type: InstallSucceeded + - lastTransitionTime: "2020-06-08T15:28:11Z" + status: "Unknown" + type: Ready + version: 0.14.0 \ No newline at end of file diff --git a/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/progressing_ready_false.yaml b/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/progressing_ready_false.yaml new file mode 100644 index 0000000..3900245 --- /dev/null +++ b/pkg/resource_customizations/operator.knative.dev/KnativeServing/testdata/progressing_ready_false.yaml @@ -0,0 +1,32 @@ +apiVersion: operator.knative.dev/v1alpha1 +kind: KnativeServing +metadata: + finalizers: + - knativeservings.operator.knative.dev + - delete-knative-serving-manifest + generation: 2 + labels: + app: knative-serving + argocd.argoproj.io/instance: knative-serving + version: 0.14.1 + name: knative-serving + namespace: knative-serving + resourceVersion: "31845507" + selfLink: /apis/operator.knative.dev/v1alpha1/namespaces/knative-serving/knativeservings/knative-serving + uid: ccbe43aa-ad72-4c2b-a5bb-21b3815d8a9b +spec: {} +status: + conditions: + - lastTransitionTime: "2020-05-18T18:21:29Z" + status: "True" + type: DependenciesInstalled + - lastTransitionTime: "2020-06-08T15:28:11Z" + status: "True" + type: DeploymentsAvailable + - lastTransitionTime: "2020-05-18T18:21:29Z" + status: "True" + type: InstallSucceeded + - lastTransitionTime: "2020-06-08T15:28:11Z" + status: "False" + type: Ready + version: 0.14.0 diff --git a/pkg/resource_customizations/operator.openshift.io/IngressController/health.lua b/pkg/resource_customizations/operator.openshift.io/IngressController/health.lua new file mode 100644 index 0000000..837a82e --- /dev/null +++ b/pkg/resource_customizations/operator.openshift.io/IngressController/health.lua @@ -0,0 +1,31 @@ +-- healthcheck for IngressController resources +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + -- if the status conditions are present, iterate over them and check their status + for _, condition in pairs(obj.status.conditions) do + if condition.type == "Degraded" and condition.status == "True" then + hs.status = "Degraded" + hs.message = condition.message + return hs + elseif condition.type == "DeploymentReplicasAllAvailable" and condition.status == "False" then + hs.status = "Progressing" + hs.message = condition.message + return hs + elseif condition.type == "Progressing" and condition.status == "True" then + hs.status = "Progressing" + hs.message = condition.reason + return hs + elseif condition.type == "Available" and condition.status == "True" then + hs.status = "Healthy" + hs.message = "IngressController is available" + return hs + end + end + end +end + +-- default status when none of the previous condition matches +hs.status = "Progressing" +hs.message = "Status of IngressController is not known yet" +return hs diff --git a/pkg/resource_customizations/operator.openshift.io/IngressController/health_test.yaml b/pkg/resource_customizations/operator.openshift.io/IngressController/health_test.yaml new file mode 100644 index 0000000..761d0d6 --- /dev/null +++ b/pkg/resource_customizations/operator.openshift.io/IngressController/health_test.yaml @@ -0,0 +1,17 @@ +tests: +- healthStatus: + status: Progressing + message: "Status of IngressController is not known yet" + inputPath: testdata/progressing_initialization.yaml +- healthStatus: + status: Progressing + message: "0/1 of replicas are available" + inputPath: testdata/progressing_pod_rollout.yaml +- healthStatus: + status: Degraded + message: "One or more other status conditions indicate a degraded state." + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "IngressController is available" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/degraded.yaml b/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/degraded.yaml new file mode 100644 index 0000000..73c7c89 --- /dev/null +++ b/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/degraded.yaml @@ -0,0 +1,103 @@ +--- +apiVersion: operator.openshift.io/v1 +kind: IngressController +metadata: + name: default + namespace: openshift-ingress-operator +spec: + domain: openshift.example.com + endpointPublishingStrategy: + hostNetwork: + httpPort: 80 + httpsPort: 443 + statsPort: 1936 + type: HostNetwork + nodePlacement: + nodeSelector: + matchLabels: + node-role.kubernetes.io/worker: "" + replicas: 1 +status: + availableReplicas: 0 + conditions: + - lastTransitionTime: "2023-01-28T10:05:06Z" + reason: Valid + status: "True" + type: Admitted + - lastTransitionTime: "2023-01-28T10:09:15Z" + status: "True" + type: PodsScheduled + - lastTransitionTime: "2023-01-28T10:05:06Z" + message: The configured endpoint publishing strategy does not include a managed + load balancer + reason: EndpointPublishingStrategyExcludesManagedLoadBalancer + status: "False" + type: LoadBalancerManaged + - lastTransitionTime: "2023-01-28T10:05:06Z" + message: No DNS zones are defined in the cluster dns config. + reason: NoDNSZones + status: "False" + type: DNSManaged + - lastTransitionTime: "2023-01-28T10:05:06Z" + status: "False" + type: Progressing + - lastTransitionTime: "2023-01-28T10:13:55Z" + message: "One or more other status conditions indicate a degraded state." + # message: 'One or more other status conditions indicate a degraded state: CanaryChecksSucceeding=False + # (CanaryChecksRepetitiveFailures: Canary route checks for the default ingress + # controller are failing)' + reason: DegradedConditions + status: "True" + type: Degraded + - lastTransitionTime: "2023-01-28T10:05:06Z" + message: IngressController is upgradeable. + reason: Upgradeable + status: "True" + type: Upgradeable + - lastTransitionTime: "2023-01-28T10:12:55Z" + message: Canary route checks for the default ingress controller are failing + reason: CanaryChecksRepetitiveFailures + status: "False" + type: CanaryChecksSucceeding + domain: openshift.example.com + endpointPublishingStrategy: + hostNetwork: + httpPort: 80 + httpsPort: 443 + protocol: TCP + statsPort: 1936 + type: HostNetwork + namespaceSelector: {} + observedGeneration: 2 + routeSelector: {} + selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller=default + tlsProfile: + ciphers: + - ECDHE-ECDSA-CHACHA20-POLY1305 + - ECDHE-RSA-CHACHA20-POLY1305 + - ECDHE-ECDSA-AES128-GCM-SHA256 + - ECDHE-RSA-AES128-GCM-SHA256 + - ECDHE-ECDSA-AES256-GCM-SHA384 + - ECDHE-RSA-AES256-GCM-SHA384 + - DHE-RSA-AES128-GCM-SHA256 + - DHE-RSA-AES256-GCM-SHA384 + - ECDHE-ECDSA-AES128-SHA256 + - ECDHE-RSA-AES128-SHA256 + - ECDHE-ECDSA-AES128-SHA + - ECDHE-RSA-AES256-SHA384 + - ECDHE-RSA-AES128-SHA + - ECDHE-ECDSA-AES256-SHA384 + - ECDHE-ECDSA-AES256-SHA + - ECDHE-RSA-AES256-SHA + - DHE-RSA-AES128-SHA256 + - DHE-RSA-AES128-SHA + - DHE-RSA-AES256-SHA256 + - DHE-RSA-AES256-SHA + - AES128-GCM-SHA256 + - AES256-GCM-SHA384 + - AES128-SHA256 + - AES256-SHA256 + - AES128-SHA + - AES256-SHA + - '!DSS' + minTLSVersion: VersionTLS11 diff --git a/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/healthy.yaml b/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/healthy.yaml new file mode 100644 index 0000000..4c7ad76 --- /dev/null +++ b/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/healthy.yaml @@ -0,0 +1,93 @@ +--- +apiVersion: operator.openshift.io/v1 +kind: IngressController +metadata: + name: apps-shard-2 + namespace: openshift-ingress-operator +spec: + domain: openshift-apps-shard-2.example.com + endpointPublishingStrategy: + hostNetwork: + httpPort: 80 + httpsPort: 443 + statsPort: 1936 + type: HostNetwork + nodePlacement: + nodeSelector: + matchLabels: + node-role.kubernetes.io/worker: "" + replicas: 1 +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2023-01-28T09:34:36Z" + reason: Valid + status: "True" + type: Admitted + - lastTransitionTime: "2023-01-28T09:43:42Z" + status: "True" + type: PodsScheduled + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: The deployment has Available status condition set to True + reason: DeploymentAvailable + status: "True" + type: DeploymentAvailable + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: Minimum replicas requirement is met + reason: DeploymentMinimumReplicasMet + status: "True" + type: DeploymentReplicasMinAvailable + - lastTransitionTime: "2023-01-28T09:44:36Z" + message: All replicas are available + reason: DeploymentReplicasAvailable + status: "True" + type: DeploymentReplicasAllAvailable + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: The configured endpoint publishing strategy does not include a managed + load balancer + reason: EndpointPublishingStrategyExcludesManagedLoadBalancer + status: "False" + type: LoadBalancerManaged + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: No DNS zones are defined in the cluster dns config. + reason: NoDNSZones + status: "False" + type: DNSManaged + - lastTransitionTime: "2023-01-28T09:34:36Z" + status: "True" + type: Available + - lastTransitionTime: "2023-01-28T09:34:36Z" + status: "False" + type: Progressing + - lastTransitionTime: "2023-01-28T09:34:36Z" + status: "False" + type: Degraded + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: IngressController is upgradeable. + reason: Upgradeable + status: "True" + type: Upgradeable + domain: openshift-apps-shard-2.example.com + endpointPublishingStrategy: + hostNetwork: + httpPort: 80 + httpsPort: 443 + protocol: TCP + statsPort: 1936 + type: HostNetwork + observedGeneration: 5 + selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller=apps-shard-2 + tlsProfile: + ciphers: + - ECDHE-ECDSA-AES128-GCM-SHA256 + - ECDHE-RSA-AES128-GCM-SHA256 + - ECDHE-ECDSA-AES256-GCM-SHA384 + - ECDHE-RSA-AES256-GCM-SHA384 + - ECDHE-ECDSA-CHACHA20-POLY1305 + - ECDHE-RSA-CHACHA20-POLY1305 + - DHE-RSA-AES128-GCM-SHA256 + - DHE-RSA-AES256-GCM-SHA384 + - TLS_AES_128_GCM_SHA256 + - TLS_AES_256_GCM_SHA384 + - TLS_CHACHA20_POLY1305_SHA256 + minTLSVersion: VersionTLS12 diff --git a/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/progressing_initialization.yaml b/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/progressing_initialization.yaml new file mode 100644 index 0000000..470216e --- /dev/null +++ b/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/progressing_initialization.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: operator.openshift.io/v1 +kind: IngressController +metadata: + name: apps-shard-2 + namespace: openshift-ingress-operator +spec: + domain: openshift-apps-shard-2.example.com + endpointPublishingStrategy: + hostNetwork: + httpPort: 80 + httpsPort: 443 + statsPort: 1936 + type: HostNetwork + nodePlacement: + nodeSelector: + matchLabels: + node-role.kubernetes.io/worker: "" + replicas: 1 +status: + availableReplicas: 0 + conditions: + - lastTransitionTime: "2023-01-28T09:34:36Z" + reason: Valid + status: "True" + type: Admitted + domain: openshift-apps-shard-2.example.com + endpointPublishingStrategy: + hostNetwork: + httpPort: 80 + httpsPort: 443 + protocol: TCP + statsPort: 1936 + type: HostNetwork + observedGeneration: 1 + selector: "" diff --git a/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/progressing_pod_rollout.yaml b/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/progressing_pod_rollout.yaml new file mode 100644 index 0000000..73a33ae --- /dev/null +++ b/pkg/resource_customizations/operator.openshift.io/IngressController/testdata/progressing_pod_rollout.yaml @@ -0,0 +1,101 @@ +--- +apiVersion: operator.openshift.io/v1 +kind: IngressController +metadata: + name: apps-shard-2 + namespace: openshift-ingress-operator +spec: + domain: openshift-apps-shard-2.example.com + endpointPublishingStrategy: + hostNetwork: + httpPort: 80 + httpsPort: 443 + statsPort: 1936 + type: HostNetwork + nodePlacement: + nodeSelector: + matchLabels: + node-role.kubernetes.io/worker: "" + replicas: 1 +status: + availableReplicas: 0 + conditions: + - lastTransitionTime: "2023-01-28T09:34:36Z" + reason: Valid + status: "True" + type: Admitted + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: 'Some pods are not scheduled: Pod "router-apps-shard-2-7b5cb5f98d-gk4hj" + cannot be scheduled: 0/6 nodes are available: 2 node(s) didn''t have free ports + for the requested pod ports, 3 node(s) had untolerated taint {node-role.kubernetes.io/master: + }, 5 node(s) didn''t match Pod''s node affinity/selector. preemption: 0/6 nodes + are available: 1 node(s) didn''t have free ports for the requested pod ports, + 5 Preemption is not helpful for scheduling. Make sure you have sufficient worker + nodes.' + reason: PodsNotScheduled + status: "False" + type: PodsScheduled + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: The deployment has Available status condition set to True + reason: DeploymentAvailable + status: "True" + type: DeploymentAvailable + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: Minimum replicas requirement is met + reason: DeploymentMinimumReplicasMet + status: "True" + type: DeploymentReplicasMinAvailable + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: 0/1 of replicas are available + reason: DeploymentReplicasNotAvailable + status: "False" + type: DeploymentReplicasAllAvailable + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: The configured endpoint publishing strategy does not include a managed + load balancer + reason: EndpointPublishingStrategyExcludesManagedLoadBalancer + status: "False" + type: LoadBalancerManaged + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: No DNS zones are defined in the cluster dns config. + reason: NoDNSZones + status: "False" + type: DNSManaged + - lastTransitionTime: "2023-01-28T09:34:36Z" + status: "True" + type: Available + - lastTransitionTime: "2023-01-28T09:34:36Z" + status: "False" + type: Progressing + - lastTransitionTime: "2023-01-28T09:34:36Z" + status: "False" + type: Degraded + - lastTransitionTime: "2023-01-28T09:34:36Z" + message: IngressController is upgradeable. + reason: Upgradeable + status: "True" + type: Upgradeable + domain: openshift-apps-shard-2.example.com + endpointPublishingStrategy: + hostNetwork: + httpPort: 80 + httpsPort: 443 + protocol: TCP + statsPort: 1936 + type: HostNetwork + observedGeneration: 2 + selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller=apps-shard-2 + tlsProfile: + ciphers: + - ECDHE-ECDSA-AES128-GCM-SHA256 + - ECDHE-RSA-AES128-GCM-SHA256 + - ECDHE-ECDSA-AES256-GCM-SHA384 + - ECDHE-RSA-AES256-GCM-SHA384 + - ECDHE-ECDSA-CHACHA20-POLY1305 + - ECDHE-RSA-CHACHA20-POLY1305 + - DHE-RSA-AES128-GCM-SHA256 + - DHE-RSA-AES256-GCM-SHA384 + - TLS_AES_128_GCM_SHA256 + - TLS_AES_256_GCM_SHA384 + - TLS_CHACHA20_POLY1305_SHA256 + minTLSVersion: VersionTLS12 diff --git a/pkg/resource_customizations/operators.coreos.com/Subscription/health.lua b/pkg/resource_customizations/operators.coreos.com/Subscription/health.lua new file mode 100644 index 0000000..ca5f917 --- /dev/null +++ b/pkg/resource_customizations/operators.coreos.com/Subscription/health.lua @@ -0,0 +1,34 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + local numDegraded = 0 + local numPending = 0 + local msg = "" + for i, condition in pairs(obj.status.conditions) do + msg = msg .. i .. ": " .. condition.type .. " | " .. condition.status .. "\n" + if condition.type == "InstallPlanPending" and condition.status == "True" then + numPending = numPending + 1 + elseif (condition.type == "InstallPlanMissing" and condition.reason ~= "ReferencedInstallPlanNotFound") then + numDegraded = numDegraded + 1 + elseif (condition.type == "CatalogSourcesUnhealthy" or condition.type == "InstallPlanFailed" or condition.type == "ResolutionFailed") and condition.status == "True" then + numDegraded = numDegraded + 1 + end + end + if numDegraded == 0 and numPending == 0 then + health_status.status = "Healthy" + health_status.message = msg + return health_status + elseif numPending > 0 and numDegraded == 0 then + health_status.status = "Progressing" + health_status.message = "An install plan for a subscription is pending installation" + return health_status + else + health_status.status = "Degraded" + health_status.message = msg + return health_status + end + end +end +health_status.status = "Progressing" +health_status.message = "An install plan for a subscription is pending installation" +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/operators.coreos.com/Subscription/health_test.yaml b/pkg/resource_customizations/operators.coreos.com/Subscription/health_test.yaml new file mode 100644 index 0000000..11f8390 --- /dev/null +++ b/pkg/resource_customizations/operators.coreos.com/Subscription/health_test.yaml @@ -0,0 +1,25 @@ +tests: +- healthStatus: + status: Progressing + message: "An install plan for a subscription is pending installation" + inputPath: testdata/install_plan_pending.yaml +- healthStatus: + status: Degraded + message: "1: CatalogSourcesUnhealthy | True\n" + inputPath: testdata/catalog_sources_unhealthy.yaml +- healthStatus: + status: Healthy + message: "1: CatalogSourcesUnhealthy | False\n2: InstallPlanMissing | True\n" + inputPath: testdata/install_plan_missing.yaml +- healthStatus: + status: Degraded + message: "1: CatalogSourcesUnhealthy | False\n2: InstallPlanFailed | True\n" + inputPath: testdata/install_plan_failed.yaml +- healthStatus: + status: Degraded + message: "1: CatalogSourcesUnhealthy | True\n2: ResolutionFailed | True\n" + inputPath: testdata/resolution_failed.yaml +- healthStatus: + status: Healthy + message: "1: CatalogSourcesUnhealthy | False\n" + inputPath: testdata/healthy.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/catalog_sources_unhealthy.yaml b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/catalog_sources_unhealthy.yaml new file mode 100644 index 0000000..2221bbb --- /dev/null +++ b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/catalog_sources_unhealthy.yaml @@ -0,0 +1,58 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: openshift-gitops-operator + namespace: openshift-operators + labels: + operators.coreos.com/openshift-gitops-operator.openshift-operators: '' +spec: + channel: stable + name: openshift-gitops-operator + source: redhat-op + sourceNamespace: openshift-marketplace + startingCSV: openshift-gitops-operator.v1.10 +status: + catalogHealth: + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: certified-operators + namespace: openshift-marketplace + resourceVersion: '767930' + uid: dba4fefa-78fa-45c2-9801-d551a086a229 + healthy: true + lastUpdated: '2021-08-25T21:16:59Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: community-operators + namespace: openshift-marketplace + resourceVersion: '769392' + uid: 3e16e348-5b8c-4382-bd71-30fd97836a6c + healthy: true + lastUpdated: '2021-08-25T21:16:59Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-marketplace + namespace: openshift-marketplace + resourceVersion: '766585' + uid: 04003c1c-5414-43e7-ba5e-738addfa2443 + healthy: true + lastUpdated: '2021-08-25T21:16:59Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-operators + namespace: openshift-marketplace + resourceVersion: '766000' + uid: 4cff9c98-14aa-4ebf-a238-2fb8fc48edb6 + healthy: true + lastUpdated: '2021-08-25T21:16:59Z' + conditions: + - lastTransitionTime: '2021-08-25T21:16:59Z' + message: targeted catalogsource openshift-marketplace/redhat-op missing + reason: UnhealthyCatalogSourceFound + status: 'True' + type: CatalogSourcesUnhealthy + lastUpdated: '2021-08-25T21:16:59Z' diff --git a/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/healthy.yaml b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/healthy.yaml new file mode 100644 index 0000000..1aee820 --- /dev/null +++ b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/healthy.yaml @@ -0,0 +1,75 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + labels: + operators.coreos.com/openshift-gitops-operator.openshift-operators: '' + name: openshift-gitops-operator + namespace: openshift-operators +spec: + channel: stable + installPlanApproval: Automatic + name: openshift-gitops-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + startingCSV: openshift-gitops-operator.v1.2.0 +status: + conditions: + - lastTransitionTime: '2021-08-23T16:03:23Z' + message: all available catalogsources are healthy + reason: AllCatalogSourcesHealthy + status: 'False' + type: CatalogSourcesUnhealthy + installplan: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: install-m7wvs + uuid: 5dfc3b72-b1a7-49b1-868a-c288ccefc667 + lastUpdated: '2021-08-23T16:03:53Z' + installedCSV: openshift-gitops-operator.v1.2.0 + currentCSV: openshift-gitops-operator.v1.2.0 + installPlanRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: install-m7wvs + namespace: openshift-operators + resourceVersion: '49175' + uid: 5dfc3b72-b1a7-49b1-868a-c288ccefc667 + state: AtLatestKnown + catalogHealth: + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: certified-operators + namespace: openshift-marketplace + resourceVersion: '47199' + uid: 1182e96f-bd44-4555-bcae-22ca5545d52f + healthy: true + lastUpdated: '2021-08-23T16:03:23Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: community-operators + namespace: openshift-marketplace + resourceVersion: '45598' + uid: fa39c2ee-af6d-4dbb-a62e-cd20c9e8f19b + healthy: true + lastUpdated: '2021-08-23T16:03:23Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-marketplace + namespace: openshift-marketplace + resourceVersion: '47712' + uid: b4d1171a-ec94-4be9-aa26-237bf4aed0f5 + healthy: true + lastUpdated: '2021-08-23T16:03:23Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-operators + namespace: openshift-marketplace + resourceVersion: '46728' + uid: a1712fa4-1159-4a9b-8e40-fe605ecbb2e5 + healthy: true + lastUpdated: '2021-08-23T16:03:23Z' + installPlanGeneration: 1 diff --git a/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_failed.yaml b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_failed.yaml new file mode 100644 index 0000000..347bc1a --- /dev/null +++ b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_failed.yaml @@ -0,0 +1,86 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + labels: + operators.coreos.com/openshift-gitops-operator.openshift-operators: '' + name: openshift-gitops-operator + namespace: openshift-operators +spec: + channel: stable + installPlanApproval: Automatic + name: openshift-gitops-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + startingCSV: openshift-gitops-operator.v1.2.0 +status: + installplan: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: install-rzdwt + uuid: 7a772cec-f487-4cf9-8689-7c533c212c82 + lastUpdated: '2021-08-24T03:46:16Z' + installedCSV: openshift-gitops-operator.v1.2.0 + currentCSV: openshift-gitops-operator.v1.2.0 + installPlanRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: install-rzdwt + namespace: openshift-operators + resourceVersion: '50025' + uid: 7a772cec-f487-4cf9-8689-7c533c212c82 + state: AtLatestKnown + catalogHealth: + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: certified-operators + namespace: openshift-marketplace + resourceVersion: '48843' + uid: 6c1dc387-00b4-4bb7-86f3-9e349a55abf0 + healthy: true + lastUpdated: '2021-08-24T02:43:15Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: community-operators + namespace: openshift-marketplace + resourceVersion: '47352' + uid: 7d63bae9-06a9-434b-be50-af60fc73c19d + healthy: true + lastUpdated: '2021-08-24T02:43:15Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-marketplace + namespace: openshift-marketplace + resourceVersion: '48061' + uid: ef6d3590-a326-4ceb-bced-e180c50ff314 + healthy: true + lastUpdated: '2021-08-24T02:43:15Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-operators + namespace: openshift-marketplace + resourceVersion: '46591' + uid: 1a6025d8-c166-4a9a-a608-c051cfc904a8 + healthy: true + lastUpdated: '2021-08-24T02:43:15Z' + conditions: + - lastTransitionTime: '2021-08-24T02:43:15Z' + message: all available catalogsources are healthy + reason: AllCatalogSourcesHealthy + status: 'False' + type: CatalogSourcesUnhealthy + - lastTransitionTime: '2021-08-24T02:53:03Z' + message: >- + api-server resource not found installing CustomResourceDefinition + gitopsservices.pipelines.openshift.io: GroupVersionKind + apiextensions.k8s.io/v1beta1, Kind=CustomResourceDefinition not found on + the cluster. This API may have been deprecated and removed, see + https://kubernetes.io/docs/reference/using-api/deprecation-guide/ for + more information. + reason: InstallComponentFailed + status: 'True' + type: InstallPlanFailed + installPlanGeneration: 1 diff --git a/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_missing.yaml b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_missing.yaml new file mode 100644 index 0000000..c3a6490 --- /dev/null +++ b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_missing.yaml @@ -0,0 +1,139 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + selfLink: >- + /apis/operators.coreos.com/v1alpha1/namespaces/openshift-operators/subscriptions/openshift-gitops-operator + resourceVersion: '147969' + name: openshift-gitops-operator + uid: 59318244-d23a-47c1-9f23-38a3ccaaf6f8 + creationTimestamp: '2021-08-30T21:43:17Z' + generation: 1 + managedFields: + - apiVersion: operators.coreos.com/v1alpha1 + fieldsType: FieldsV1 + fieldsV1: + 'f:spec': + .: {} + 'f:channel': {} + 'f:installPlanApproval': {} + 'f:name': {} + 'f:source': {} + 'f:sourceNamespace': {} + 'f:startingCSV': {} + manager: Mozilla + operation: Update + time: '2021-08-30T21:43:17Z' + - apiVersion: operators.coreos.com/v1alpha1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:labels': + .: {} + 'f:operators.coreos.com/openshift-gitops-operator.openshift-operators': {} + manager: olm + operation: Update + time: '2021-08-30T21:43:17Z' + - apiVersion: operators.coreos.com/v1alpha1 + fieldsType: FieldsV1 + fieldsV1: + 'f:status': + 'f:installedCSV': {} + 'f:currentCSV': {} + 'f:catalogHealth': {} + 'f:installPlanRef': + .: {} + 'f:apiVersion': {} + 'f:kind': {} + 'f:name': {} + 'f:namespace': {} + 'f:resourceVersion': {} + 'f:uid': {} + 'f:installPlanGeneration': {} + 'f:conditions': {} + .: {} + 'f:installplan': + .: {} + 'f:apiVersion': {} + 'f:kind': {} + 'f:name': {} + 'f:uuid': {} + 'f:lastUpdated': {} + 'f:state': {} + manager: catalog + operation: Update + time: '2021-08-30T21:43:19Z' + namespace: openshift-operators + labels: + operators.coreos.com/openshift-gitops-operator.openshift-operators: '' +spec: + channel: stable + installPlanApproval: Automatic + name: openshift-gitops-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + startingCSV: openshift-gitops-operator.v1.2.0 +status: + installplan: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: install-jx26v + uuid: 8566ad1f-c3ea-4367-aba5-db021b8cef45 + lastUpdated: '2021-08-30T21:43:30Z' + installedCSV: openshift-gitops-operator.v1.2.0 + currentCSV: openshift-gitops-operator.v1.2.0 + installPlanRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: install-jx26v + namespace: openshift-operators + resourceVersion: '147595' + uid: 8566ad1f-c3ea-4367-aba5-db021b8cef45 + state: AtLatestKnown + catalogHealth: + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: certified-operators + namespace: openshift-marketplace + resourceVersion: '145894' + uid: 12fb6b00-6839-4360-89ea-0ed98cdc94f1 + healthy: true + lastUpdated: '2021-08-30T21:43:17Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: community-operators + namespace: openshift-marketplace + resourceVersion: '145058' + uid: e8913c52-5002-404f-92a4-d7b6eb35ea54 + healthy: true + lastUpdated: '2021-08-30T21:43:17Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-marketplace + namespace: openshift-marketplace + resourceVersion: '143953' + uid: 5bc9368c-50ee-4079-b66c-e32c8f75fa52 + healthy: true + lastUpdated: '2021-08-30T21:43:17Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-operators + namespace: openshift-marketplace + resourceVersion: '143604' + uid: 57a67470-3344-48db-8790-91853b62b650 + healthy: true + lastUpdated: '2021-08-30T21:43:17Z' + conditions: + - lastTransitionTime: '2021-08-30T21:43:17Z' + message: all available catalogsources are healthy + reason: AllCatalogSourcesHealthy + status: 'False' + type: CatalogSourcesUnhealthy + - lastTransitionTime: '2021-08-30T21:43:30Z' + reason: ReferencedInstallPlanNotFound + status: 'True' + type: InstallPlanMissing + installPlanGeneration: 1 diff --git a/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_pending.yaml b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_pending.yaml new file mode 100644 index 0000000..c581e32 --- /dev/null +++ b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/install_plan_pending.yaml @@ -0,0 +1,78 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: openshift-gitops-operator + namespace: openshift-operators + labels: + operators.coreos.com/openshift-gitops-operator.openshift-operators: '' +spec: + channel: stable + installPlanApproval: Automatic + name: openshift-gitops-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + startingCSV: openshift-gitops-operator.v1.2.0 +status: + catalogHealth: + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: certified-operators + namespace: openshift-marketplace + resourceVersion: '720254' + uid: dba4fefa-78fa-45c2-9801-d551a086a229 + healthy: true + lastUpdated: '2021-08-25T20:55:30Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: community-operators + namespace: openshift-marketplace + resourceVersion: '720253' + uid: 3e16e348-5b8c-4382-bd71-30fd97836a6c + healthy: true + lastUpdated: '2021-08-25T20:55:30Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-marketplace + namespace: openshift-marketplace + resourceVersion: '707692' + uid: 04003c1c-5414-43e7-ba5e-738addfa2443 + healthy: true + lastUpdated: '2021-08-25T20:55:30Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-operators + namespace: openshift-marketplace + resourceVersion: '716643' + uid: 4cff9c98-14aa-4ebf-a238-2fb8fc48edb6 + healthy: true + lastUpdated: '2021-08-25T20:55:30Z' + conditions: + - lastTransitionTime: '2021-08-25T20:55:30Z' + message: all available catalogsources are healthy + reason: AllCatalogSourcesHealthy + status: 'False' + type: CatalogSourcesUnhealthy + - lastTransitionTime: '2021-08-25T20:55:36Z' + reason: Installing + status: 'True' + type: InstallPlanPending + currentCSV: openshift-gitops-operator.v1.2.0 + installPlanGeneration: 2 + installPlanRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: install-gldzx + namespace: openshift-operators + resourceVersion: '736450' + uid: cc3edd24-ae7c-4ddf-b924-aee1611ff4a1 + installplan: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + name: install-gldzx + uuid: cc3edd24-ae7c-4ddf-b924-aee1611ff4a1 + lastUpdated: '2021-08-25T20:55:36Z' + state: UpgradePending diff --git a/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/resolution_failed.yaml b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/resolution_failed.yaml new file mode 100644 index 0000000..7c6e480 --- /dev/null +++ b/pkg/resource_customizations/operators.coreos.com/Subscription/testdata/resolution_failed.yaml @@ -0,0 +1,72 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + creationTimestamp: '2021-08-31T17:39:09Z' + generation: 1 + labels: + operators.coreos.com/openshift-gitops-operator.openshift-operators: '' + operators.coreos.com/openshift-gitops123-operator.openshift-operators: '' + name: openshift-gitops123-operator + namespace: openshift-operators + resourceVersion: '83862' + uid: db43fe71-7937-47d4-84e9-382316c1403c +spec: + channel: stable + installPlanApproval: Automatic + name: openshift-gitops123-operator + source: redhat-operators66 + sourceNamespace: openshift-marketplace + startingCSV: openshift-gitops-operator.v1.2.0 +status: + catalogHealth: + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: certified-operators + namespace: openshift-marketplace + resourceVersion: '82603' + uid: 3bca2bd4-5376-4a66-929a-7374540011ab + healthy: true + lastUpdated: '2021-08-31T17:39:09Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: community-operators + namespace: openshift-marketplace + resourceVersion: '81603' + uid: de132714-31c7-441e-9a62-bcd2f3908b75 + healthy: true + lastUpdated: '2021-08-31T17:39:09Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-marketplace + namespace: openshift-marketplace + resourceVersion: '81610' + uid: 36652a07-cd6e-46a5-a53f-74520250b69a + healthy: true + lastUpdated: '2021-08-31T17:39:09Z' + - catalogSourceRef: + apiVersion: operators.coreos.com/v1alpha1 + kind: CatalogSource + name: redhat-operators + namespace: openshift-marketplace + resourceVersion: '81602' + uid: f73d6576-5955-4de2-bf4c-2b4c5c0a9077 + healthy: true + lastUpdated: '2021-08-31T17:39:09Z' + conditions: + - lastTransitionTime: '2021-08-31T17:39:09Z' + message: targeted catalogsource openshift-marketplace/redhat-operators66 missing + reason: UnhealthyCatalogSourceFound + status: 'True' + type: CatalogSourcesUnhealthy + - message: >- + constraints not satisfiable: no operators found from catalog + redhat-operators66 in namespace openshift-marketplace referenced by + subscription openshift-gitops123-operator, subscription + openshift-gitops123-operator exists + reason: ConstraintsNotSatisfiable + status: 'True' + type: ResolutionFailed + lastUpdated: '2021-08-31T17:39:12Z' diff --git a/pkg/resource_customizations/pkg.crossplane.io/Provider/health.lua b/pkg/resource_customizations/pkg.crossplane.io/Provider/health.lua new file mode 100644 index 0000000..bcffa34 --- /dev/null +++ b/pkg/resource_customizations/pkg.crossplane.io/Provider/health.lua @@ -0,0 +1,27 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + local installed = false + local healthy = false + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Installed" then + installed = condition.status == "True" + installed_message = condition.reason + elseif condition.type == "Healthy" then + healthy = condition.status == "True" + healthy_message = condition.reason + end + end + if installed and healthy then + hs.status = "Healthy" + else + hs.status = "Degraded" + end + hs.message = installed_message .. " " .. healthy_message + return hs + end +end + +hs.status = "Progressing" +hs.message = "Waiting for provider to be installed" +return hs diff --git a/pkg/resource_customizations/pkg.crossplane.io/Provider/health_test.yaml b/pkg/resource_customizations/pkg.crossplane.io/Provider/health_test.yaml new file mode 100644 index 0000000..a772cf9 --- /dev/null +++ b/pkg/resource_customizations/pkg.crossplane.io/Provider/health_test.yaml @@ -0,0 +1,17 @@ +tests: +- healthStatus: + status: Progressing + message: Waiting for provider to be installed + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Degraded + message: ActivePackageRevision UnhealthyPackageRevision + inputPath: testdata/degraded_installed.yaml +- healthStatus: + status: Degraded + message: UnpackingPackage HealthyPackageRevision + inputPath: testdata/degraded_healthy.yaml +- healthStatus: + status: Healthy + message: ActivePackageRevision HealthyPackageRevision + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/degraded_healthy.yaml b/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/degraded_healthy.yaml new file mode 100644 index 0000000..de416a6 --- /dev/null +++ b/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/degraded_healthy.yaml @@ -0,0 +1,23 @@ +apiVersion: pkg.crossplane.io/v1alpha1 +kind: Provider +metadata: + creationTimestamp: "2020-10-28T15:25:56Z" + generation: 2 + name: provider-helm + resourceVersion: "1664592" + selfLink: /apis/pkg.crossplane.io/v1alpha1/providers/provider-helm + uid: 6ce16954-64f1-48c5-8a80-e29af37fc736 +spec: + package: crossplane/provider-helm:v0.3.5error + revisionActivationPolicy: Automatic +status: + conditions: + - lastTransitionTime: "2020-11-06T12:37:16Z" + reason: UnpackingPackage + status: "False" + type: Installed + - lastTransitionTime: "2020-11-06T10:19:07Z" + reason: HealthyPackageRevision + status: "True" + type: Healthy + currentRevision: provider-helm-7f275162c62e diff --git a/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/degraded_installed.yaml b/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/degraded_installed.yaml new file mode 100644 index 0000000..705a44c --- /dev/null +++ b/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/degraded_installed.yaml @@ -0,0 +1,23 @@ +apiVersion: pkg.crossplane.io/v1alpha1 +kind: Provider +metadata: + creationTimestamp: "2020-10-28T15:25:56Z" + generation: 4 + name: provider-helm + resourceVersion: "1666009" + selfLink: /apis/pkg.crossplane.io/v1alpha1/providers/provider-helm + uid: 6ce16954-64f1-48c5-8a80-e29af37fc736 +spec: + package: crossplane/provider-helm:v0.3.6 + revisionActivationPolicy: Automatic +status: + conditions: + - lastTransitionTime: "2020-11-06T12:44:22Z" + reason: ActivePackageRevision + status: "True" + type: Installed + - lastTransitionTime: "2020-11-06T12:44:21Z" + reason: UnhealthyPackageRevision + status: "False" + type: Healthy + currentRevision: provider-helm-61a254740524 diff --git a/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/healthy.yaml b/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/healthy.yaml new file mode 100644 index 0000000..fab33d5 --- /dev/null +++ b/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/healthy.yaml @@ -0,0 +1,23 @@ +apiVersion: pkg.crossplane.io/v1alpha1 +kind: Provider +metadata: + creationTimestamp: "2020-10-28T15:25:56Z" + generation: 4 + name: provider-helm + resourceVersion: "1666154" + selfLink: /apis/pkg.crossplane.io/v1alpha1/providers/provider-helm + uid: 6ce16954-64f1-48c5-8a80-e29af37fc736 +spec: + package: crossplane/provider-helm:v0.3.6 + revisionActivationPolicy: Automatic +status: + conditions: + - lastTransitionTime: "2020-11-06T12:44:28Z" + reason: ActivePackageRevision + status: "True" + type: Installed + - lastTransitionTime: "2020-11-06T12:44:26Z" + reason: HealthyPackageRevision + status: "True" + type: Healthy + currentRevision: provider-helm-61a254740524 diff --git a/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..232c607 --- /dev/null +++ b/pkg/resource_customizations/pkg.crossplane.io/Provider/testdata/progressing_noStatus.yaml @@ -0,0 +1,12 @@ +apiVersion: pkg.crossplane.io/v1alpha1 +kind: Provider +metadata: + creationTimestamp: "2020-10-28T15:25:56Z" + generation: 4 + name: provider-helm + resourceVersion: "1666154" + selfLink: /apis/pkg.crossplane.io/v1alpha1/providers/provider-helm + uid: 6ce16954-64f1-48c5-8a80-e29af37fc736 +spec: + package: crossplane/provider-helm:v0.3.6 + revisionActivationPolicy: Automatic diff --git a/pkg/resource_customizations/platform.confluent.io/Connect/health.lua b/pkg/resource_customizations/platform.confluent.io/Connect/health.lua new file mode 100644 index 0000000..5ae4d7a --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Connect/health.lua @@ -0,0 +1,19 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.phase ~= nil then + if obj.status.phase == "RUNNING" then + hs.status = "Healthy" + hs.message = "Connect running" + return hs + end + if obj.status.phase == "PROVISIONING" then + hs.status = "Progressing" + hs.message = "Connect provisioning" + return hs + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Connect" +return hs diff --git a/pkg/resource_customizations/platform.confluent.io/Connect/health_test.yaml b/pkg/resource_customizations/platform.confluent.io/Connect/health_test.yaml new file mode 100644 index 0000000..9cff61a --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Connect/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Progressing + message: Connect provisioning + inputPath: testdata/progressing.yaml + - healthStatus: + status: Healthy + message: Connect running + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/platform.confluent.io/Connect/testdata/healthy.yaml b/pkg/resource_customizations/platform.confluent.io/Connect/testdata/healthy.yaml new file mode 100644 index 0000000..92c46d9 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Connect/testdata/healthy.yaml @@ -0,0 +1,49 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: Connect +metadata: + finalizers: + - connect.finalizers.platform.confluent.io + generation: 1 + name: connect + namespace: confluent +spec: + dependencies: + kafka: + bootstrapEndpoint: kafka:9071 + replicas: 1 +status: + clusterName: connect + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:03:56Z" + lastTransitionTime: "2021-08-11T10:09:01Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:03:56Z" + lastTransitionTime: "2021-08-11T10:09:01Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:03:56Z" + lastTransitionTime: "2021-08-11T10:03:56Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 1 + groupId: confluent.connect + internalTopicNames: + - confluent.connect-offsets + - confluent.connect-configs + - confluent.connect-status + kafka: + bootstrapEndpoint: kafka:9071 + operatorVersion: v0.174.13 + phase: RUNNING + readyReplicas: 1 + replicas: 1 + restConfig: + internalEndpoint: http://connect.confluent.svc.cluster.local:8083 diff --git a/pkg/resource_customizations/platform.confluent.io/Connect/testdata/progressing.yaml b/pkg/resource_customizations/platform.confluent.io/Connect/testdata/progressing.yaml new file mode 100644 index 0000000..7402267 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Connect/testdata/progressing.yaml @@ -0,0 +1,49 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: Connect +metadata: + finalizers: + - connect.finalizers.platform.confluent.io + generation: 1 + name: connect + namespace: confluent +spec: + dependencies: + kafka: + bootstrapEndpoint: kafka:9071 + replicas: 1 +status: + clusterName: connect + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:03:56Z" + lastTransitionTime: "2021-08-11T10:09:01Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:03:56Z" + lastTransitionTime: "2021-08-11T10:09:01Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:03:56Z" + lastTransitionTime: "2021-08-11T10:03:56Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 1 + groupId: confluent.connect + internalTopicNames: + - confluent.connect-offsets + - confluent.connect-configs + - confluent.connect-status + kafka: + bootstrapEndpoint: kafka:9071 + operatorVersion: v0.174.13 + phase: PROVISIONING + readyReplicas: 1 + replicas: 1 + restConfig: + internalEndpoint: http://connect.confluent.svc.cluster.local:8083 diff --git a/pkg/resource_customizations/platform.confluent.io/ControlCenter/health.lua b/pkg/resource_customizations/platform.confluent.io/ControlCenter/health.lua new file mode 100644 index 0000000..2270545 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/ControlCenter/health.lua @@ -0,0 +1,19 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.phase ~= nil then + if obj.status.phase == "RUNNING" then + hs.status = "Healthy" + hs.message = "ControlCenter running" + return hs + end + if obj.status.phase == "PROVISIONING" then + hs.status = "Progressing" + hs.message = "ControlCenter provisioning" + return hs + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for ControlCenter" +return hs diff --git a/pkg/resource_customizations/platform.confluent.io/ControlCenter/health_test.yaml b/pkg/resource_customizations/platform.confluent.io/ControlCenter/health_test.yaml new file mode 100644 index 0000000..b5f0170 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/ControlCenter/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Progressing + message: ControlCenter provisioning + inputPath: testdata/progressing.yaml + - healthStatus: + status: Healthy + message: ControlCenter running + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/platform.confluent.io/ControlCenter/testdata/healthy.yaml b/pkg/resource_customizations/platform.confluent.io/ControlCenter/testdata/healthy.yaml new file mode 100644 index 0000000..ae2e90e --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/ControlCenter/testdata/healthy.yaml @@ -0,0 +1,47 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: ControlCenter +metadata: + finalizers: + - controlcenter.finalizers.platform.confluent.io + generation: 1 + name: controlcenter + namespace: confluent +spec: + dataVolumeCapacity: 10Gi + dependencies: + connect: + - name: connect + url: http://connect:8083 + ksqldb: + - name: ksqldb + url: http://ksqldb:8088 + schemaRegistry: + url: http://schemaregistry:8081 + replicas: 1 +status: + clusterName: controlcenter + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:08:37Z" + lastTransitionTime: "2021-08-11T10:10:37Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:08:37Z" + lastTransitionTime: "2021-08-11T10:10:37Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + controlCenterName: _confluent-controlcenter + currentReplicas: 1 + id: 0 + kafka: + bootstrapEndpoint: kafka.confluent.svc.cluster.local:9071 + operatorVersion: v0.174.13 + phase: RUNNING + readyReplicas: 1 + replicas: 1 + restConfig: + internalEndpoint: http://controlcenter.confluent.svc.cluster.local:9021 diff --git a/pkg/resource_customizations/platform.confluent.io/ControlCenter/testdata/progressing.yaml b/pkg/resource_customizations/platform.confluent.io/ControlCenter/testdata/progressing.yaml new file mode 100644 index 0000000..be3c6ea --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/ControlCenter/testdata/progressing.yaml @@ -0,0 +1,46 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: ControlCenter +metadata: + finalizers: + - controlcenter.finalizers.platform.confluent.io + generation: 1 + name: controlcenter + namespace: confluent +spec: + dataVolumeCapacity: 10Gi + dependencies: + connect: + - name: connect + url: http://connect:8083 + ksqldb: + - name: ksqldb + url: http://ksqldb:8088 + schemaRegistry: + url: http://schemaregistry:8081 + replicas: 1 +status: + clusterName: controlcenter + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-02T11:44:51Z" + lastTransitionTime: "2021-08-06T12:50:58Z" + message: Deployment does not have minimum availability. + reason: MinimumReplicasUnavailable + status: "False" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-02T11:44:51Z" + lastTransitionTime: "2021-08-06T12:50:58Z" + message: Kubernetes resources not ready. + reason: KubernetesResourcesNotReady + status: "False" + type: platform.confluent.io/resources-ready + controlCenterName: _confluent-controlcenter + currentReplicas: 1 + id: 0 + kafka: + bootstrapEndpoint: kafka.confluent.svc.cluster.local:9071 + operatorVersion: v0.174.13 + phase: PROVISIONING + replicas: 1 + restConfig: + internalEndpoint: http://controlcenter.confluent.svc.cluster.local:9021 diff --git a/pkg/resource_customizations/platform.confluent.io/Kafka/health.lua b/pkg/resource_customizations/platform.confluent.io/Kafka/health.lua new file mode 100644 index 0000000..1abafd5 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Kafka/health.lua @@ -0,0 +1,19 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.phase ~= nil then + if obj.status.phase == "RUNNING" then + hs.status = "Healthy" + hs.message = "Kafka running" + return hs + end + if obj.status.phase == "PROVISIONING" then + hs.status = "Progressing" + hs.message = "Kafka provisioning" + return hs + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Kafka" +return hs diff --git a/pkg/resource_customizations/platform.confluent.io/Kafka/health_test.yaml b/pkg/resource_customizations/platform.confluent.io/Kafka/health_test.yaml new file mode 100644 index 0000000..1fb857a --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Kafka/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Progressing + message: Kafka provisioning + inputPath: testdata/progressing.yaml + - healthStatus: + status: Healthy + message: Kafka running + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/platform.confluent.io/Kafka/testdata/healthy.yaml b/pkg/resource_customizations/platform.confluent.io/Kafka/testdata/healthy.yaml new file mode 100644 index 0000000..3388533 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Kafka/testdata/healthy.yaml @@ -0,0 +1,63 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: Kafka +metadata: + generation: 1 + name: kafka + namespace: confluent +spec: + dataVolumeCapacity: 10Gi + metricReporter: + enabled: true + replicas: 3 +status: + clusterName: kafka + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:06:58Z" + lastTransitionTime: "2021-08-11T10:08:42Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:06:58Z" + lastTransitionTime: "2021-08-11T10:08:42Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:06:58Z" + lastTransitionTime: "2021-08-11T10:06:58Z" + message: Cluster is not rolling, ignore=false + reason: ClusterNotRolling + status: "False" + type: platform.confluent.io/rolling + - lastProbeTime: "2021-08-11T10:06:58Z" + lastTransitionTime: "2021-08-11T10:06:58Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 3 + listeners: + external: + client: |- + bootstrap.servers=kafka.confluent.svc.cluster.local:9092 + security.protocol=PLAINTEXT + internalEndpoint: kafka.confluent.svc.cluster.local:9092 + internal: + client: |- + bootstrap.servers=kafka.confluent.svc.cluster.local:9071 + security.protocol=PLAINTEXT + internalEndpoint: kafka.confluent.svc.cluster.local:9071 + replication: + internalEndpoint: kafka.confluent.svc.cluster.local:9072 + minISR: 2 + operatorVersion: v0.174.13 + phase: RUNNING + readyReplicas: 3 + replicas: 3 + replicationFactor: 3 + services: + kafka-rest: + internalEndpoint: http://kafka.confluent.svc.cluster.local:8090 + zookeeperConnect: zookeeper.confluent.svc.cluster.local:2181/kafka-confluent diff --git a/pkg/resource_customizations/platform.confluent.io/Kafka/testdata/progressing.yaml b/pkg/resource_customizations/platform.confluent.io/Kafka/testdata/progressing.yaml new file mode 100644 index 0000000..c276682 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Kafka/testdata/progressing.yaml @@ -0,0 +1,63 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: Kafka +metadata: + generation: 1 + name: kafka + namespace: confluent +spec: + dataVolumeCapacity: 10Gi + metricReporter: + enabled: true + replicas: 3 +status: + clusterName: kafka + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:06:58Z" + lastTransitionTime: "2021-08-11T10:08:42Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:06:58Z" + lastTransitionTime: "2021-08-11T10:08:42Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:06:58Z" + lastTransitionTime: "2021-08-11T10:06:58Z" + message: Cluster is not rolling, ignore=false + reason: ClusterNotRolling + status: "False" + type: platform.confluent.io/rolling + - lastProbeTime: "2021-08-11T10:06:58Z" + lastTransitionTime: "2021-08-11T10:06:58Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 3 + listeners: + external: + client: |- + bootstrap.servers=kafka.confluent.svc.cluster.local:9092 + security.protocol=PLAINTEXT + internalEndpoint: kafka.confluent.svc.cluster.local:9092 + internal: + client: |- + bootstrap.servers=kafka.confluent.svc.cluster.local:9071 + security.protocol=PLAINTEXT + internalEndpoint: kafka.confluent.svc.cluster.local:9071 + replication: + internalEndpoint: kafka.confluent.svc.cluster.local:9072 + minISR: 2 + operatorVersion: v0.174.13 + phase: PROVISIONING + readyReplicas: 3 + replicas: 3 + replicationFactor: 3 + services: + kafka-rest: + internalEndpoint: http://kafka.confluent.svc.cluster.local:8090 + zookeeperConnect: zookeeper.confluent.svc.cluster.local:2181/kafka-confluent diff --git a/pkg/resource_customizations/platform.confluent.io/KsqlDB/health.lua b/pkg/resource_customizations/platform.confluent.io/KsqlDB/health.lua new file mode 100644 index 0000000..263a727 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/KsqlDB/health.lua @@ -0,0 +1,19 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.phase ~= nil then + if obj.status.phase == "RUNNING" then + hs.status = "Healthy" + hs.message = "KsqlDB running" + return hs + end + if obj.status.phase == "PROVISIONING" then + hs.status = "Progressing" + hs.message = "KsqlDB provisioning" + return hs + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for KsqlDB" +return hs diff --git a/pkg/resource_customizations/platform.confluent.io/KsqlDB/health_test.yaml b/pkg/resource_customizations/platform.confluent.io/KsqlDB/health_test.yaml new file mode 100644 index 0000000..70f1a0a --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/KsqlDB/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Progressing + message: KsqlDB provisioning + inputPath: testdata/progressing.yaml + - healthStatus: + status: Healthy + message: KsqlDB running + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/platform.confluent.io/KsqlDB/testdata/healthy.yaml b/pkg/resource_customizations/platform.confluent.io/KsqlDB/testdata/healthy.yaml new file mode 100644 index 0000000..be4a282 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/KsqlDB/testdata/healthy.yaml @@ -0,0 +1,44 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: KsqlDB +metadata: + generation: 1 + name: ksqldb + namespace: confluent +spec: + dataVolumeCapacity: 10Gi + replicas: 1 +status: + clusterName: ksqldb + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:08:31Z" + lastTransitionTime: "2021-08-11T10:10:17Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:08:31Z" + lastTransitionTime: "2021-08-11T10:10:17Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:08:31Z" + lastTransitionTime: "2021-08-11T10:08:31Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 1 + internalTopicNames: + - _confluent-ksql-confluent.ksqldb__command_topic + - _confluent-ksql-confluent.ksqldb__configs + kafka: + bootstrapEndpoint: kafka.confluent.svc.cluster.local:9071 + operatorVersion: v0.174.13 + phase: RUNNING + readyReplicas: 1 + replicas: 1 + restConfig: + internalEndpoint: http://ksqldb.confluent.svc.cluster.local:8088 + serviceId: confluent.ksqldb_ diff --git a/pkg/resource_customizations/platform.confluent.io/KsqlDB/testdata/progressing.yaml b/pkg/resource_customizations/platform.confluent.io/KsqlDB/testdata/progressing.yaml new file mode 100644 index 0000000..bcb6076 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/KsqlDB/testdata/progressing.yaml @@ -0,0 +1,44 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: KsqlDB +metadata: + generation: 1 + name: ksqldb + namespace: confluent +spec: + dataVolumeCapacity: 10Gi + replicas: 1 +status: + clusterName: ksqldb + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:08:31Z" + lastTransitionTime: "2021-08-11T10:10:17Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:08:31Z" + lastTransitionTime: "2021-08-11T10:10:17Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:08:31Z" + lastTransitionTime: "2021-08-11T10:08:31Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 1 + internalTopicNames: + - _confluent-ksql-confluent.ksqldb__command_topic + - _confluent-ksql-confluent.ksqldb__configs + kafka: + bootstrapEndpoint: kafka.confluent.svc.cluster.local:9071 + operatorVersion: v0.174.13 + phase: PROVISIONING + readyReplicas: 1 + replicas: 1 + restConfig: + internalEndpoint: http://ksqldb.confluent.svc.cluster.local:8088 + serviceId: confluent.ksqldb_ diff --git a/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/health.lua b/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/health.lua new file mode 100644 index 0000000..9aaa1a5 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/health.lua @@ -0,0 +1,19 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.phase ~= nil then + if obj.status.phase == "RUNNING" then + hs.status = "Healthy" + hs.message = "SchemaRegistry running" + return hs + end + if obj.status.phase == "PROVISIONING" then + hs.status = "Progressing" + hs.message = "SchemaRegistry provisioning" + return hs + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for SchemaRegistry" +return hs diff --git a/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/health_test.yaml b/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/health_test.yaml new file mode 100644 index 0000000..2d44f6a --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Progressing + message: SchemaRegistry provisioning + inputPath: testdata/progressing.yaml + - healthStatus: + status: Healthy + message: SchemaRegistry running + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/testdata/healthy.yaml b/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/testdata/healthy.yaml new file mode 100644 index 0000000..8eca77e --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/testdata/healthy.yaml @@ -0,0 +1,45 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: SchemaRegistry +metadata: + finalizers: + - schemaregistry.finalizers.platform.confluent.io + generation: 1 + name: schemaregistry + namespace: confluent +spec: + replicas: 1 +status: + clusterName: schemaregistry + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:08:32Z" + lastTransitionTime: "2021-08-11T10:09:41Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:08:32Z" + lastTransitionTime: "2021-08-11T10:09:41Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:08:32Z" + lastTransitionTime: "2021-08-11T10:08:32Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 1 + groupId: id_schemaregistry_confluent + internalTopicNames: + - _schemas_schemaregistry_confluent + kafka: + bootstrapEndpoint: kafka.confluent.svc.cluster.local:9071 + metricPrefix: schemaregistry_confluent + operatorVersion: v0.174.13 + phase: RUNNING + readyReplicas: 1 + replicas: 1 + restConfig: + internalEndpoint: http://schemaregistry.confluent.svc.cluster.local:8081 diff --git a/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/testdata/progressing.yaml b/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/testdata/progressing.yaml new file mode 100644 index 0000000..a12232b --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/SchemaRegistry/testdata/progressing.yaml @@ -0,0 +1,45 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: SchemaRegistry +metadata: + finalizers: + - schemaregistry.finalizers.platform.confluent.io + generation: 1 + name: schemaregistry + namespace: confluent +spec: + replicas: 1 +status: + clusterName: schemaregistry + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:08:32Z" + lastTransitionTime: "2021-08-11T10:09:41Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:08:32Z" + lastTransitionTime: "2021-08-11T10:09:41Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:08:32Z" + lastTransitionTime: "2021-08-11T10:08:32Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 1 + groupId: id_schemaregistry_confluent + internalTopicNames: + - _schemas_schemaregistry_confluent + kafka: + bootstrapEndpoint: kafka.confluent.svc.cluster.local:9071 + metricPrefix: schemaregistry_confluent + operatorVersion: v0.174.13 + phase: PROVISIONING + readyReplicas: 1 + replicas: 1 + restConfig: + internalEndpoint: http://schemaregistry.confluent.svc.cluster.local:8081 diff --git a/pkg/resource_customizations/platform.confluent.io/Zookeeper/health.lua b/pkg/resource_customizations/platform.confluent.io/Zookeeper/health.lua new file mode 100644 index 0000000..92f89b1 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Zookeeper/health.lua @@ -0,0 +1,19 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.phase ~= nil then + if obj.status.phase == "RUNNING" then + hs.status = "Healthy" + hs.message = "Zookeeper running" + return hs + end + if obj.status.phase == "PROVISIONING" then + hs.status = "Progressing" + hs.message = "Zookeeper provisioning" + return hs + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for Zookeeper" +return hs diff --git a/pkg/resource_customizations/platform.confluent.io/Zookeeper/health_test.yaml b/pkg/resource_customizations/platform.confluent.io/Zookeeper/health_test.yaml new file mode 100644 index 0000000..7251ec4 --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Zookeeper/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Progressing + message: Zookeeper provisioning + inputPath: testdata/progressing.yaml + - healthStatus: + status: Healthy + message: Zookeeper running + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/platform.confluent.io/Zookeeper/testdata/healthy.yaml b/pkg/resource_customizations/platform.confluent.io/Zookeeper/testdata/healthy.yaml new file mode 100644 index 0000000..7bc00dc --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Zookeeper/testdata/healthy.yaml @@ -0,0 +1,44 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: Zookeeper +metadata: + finalizers: + - zookeeper.finalizers.platform.confluent.io + generation: 1 + name: zookeeper + namespace: confluent +spec: + dataVolumeCapacity: 10Gi + logVolumeCapacity: 10Gi + podTemplate: + annotations: + traffic.sidecar.istio.io/excludeInboundPorts: 2888,3888 + traffic.sidecar.istio.io/excludeOutboundPorts: 2888,3888 + replicas: 3 +status: + clusterName: zookeeper + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:04:53Z" + lastTransitionTime: "2021-08-11T10:06:48Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:04:53Z" + lastTransitionTime: "2021-08-11T10:06:48Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:04:53Z" + lastTransitionTime: "2021-08-11T10:04:53Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 3 + endpoint: zookeeper.confluent.svc.cluster.local:2181 + operatorVersion: v0.174.13 + phase: RUNNING + readyReplicas: 3 + replicas: 3 diff --git a/pkg/resource_customizations/platform.confluent.io/Zookeeper/testdata/progressing.yaml b/pkg/resource_customizations/platform.confluent.io/Zookeeper/testdata/progressing.yaml new file mode 100644 index 0000000..be0496a --- /dev/null +++ b/pkg/resource_customizations/platform.confluent.io/Zookeeper/testdata/progressing.yaml @@ -0,0 +1,44 @@ +apiVersion: platform.confluent.io/v1beta1 +kind: Zookeeper +metadata: + finalizers: + - zookeeper.finalizers.platform.confluent.io + generation: 1 + name: zookeeper + namespace: confluent +spec: + dataVolumeCapacity: 10Gi + logVolumeCapacity: 10Gi + podTemplate: + annotations: + traffic.sidecar.istio.io/excludeInboundPorts: 2888,3888 + traffic.sidecar.istio.io/excludeOutboundPorts: 2888,3888 + replicas: 3 +status: + clusterName: zookeeper + clusterNamespace: confluent + conditions: + - lastProbeTime: "2021-08-11T10:04:53Z" + lastTransitionTime: "2021-08-11T10:06:48Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: platform.confluent.io/statefulset-available + - lastProbeTime: "2021-08-11T10:04:53Z" + lastTransitionTime: "2021-08-11T10:06:48Z" + message: Kubernetes resources ready. + reason: KubernetesResourcesReady + status: "True" + type: platform.confluent.io/resources-ready + - lastProbeTime: "2021-08-11T10:04:53Z" + lastTransitionTime: "2021-08-11T10:04:53Z" + message: Cluster is not being garbage collected + reason: ClusterNotShrunk + status: "False" + type: platform.confluent.io/garbage-collecting + currentReplicas: 3 + endpoint: zookeeper.confluent.svc.cluster.local:2181 + operatorVersion: v0.174.13 + phase: PROVISIONING + readyReplicas: 3 + replicas: 3 diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/health.lua b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/health.lua new file mode 100644 index 0000000..a83520e --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/health.lua @@ -0,0 +1,29 @@ +local adopted = { status = "Unknown" } +local advertised = { status = "Unknown" } +local discovered = { status = "Unknown" } + +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, c in ipairs(obj.status.conditions) do + if c.type == "Adopted" then + adopted = c + elseif c.type == "Advertised" then + advertised = c + elseif c.type == "Discoverable" then + discovered = c + end + end + end +end + +if adopted.status == "False" then + return { status = "Degraded", message = adopted.message } +elseif advertised.reason == "AdvertiseError" or advertised.reason == "UnadvertiseError" then + return { status = "Degraded", message = advertised.message } +elseif discovered.reason == "DiscoveryError" then + return { status = "Unknown", message = discovered.message } +elseif discovered.status == "True" then + return { status = "Healthy", message = discovered.message } +else + return { status = "Progressing", message = discovered.message } +end diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/health_test.yaml b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/health_test.yaml new file mode 100644 index 0000000..1b9b30c --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/health_test.yaml @@ -0,0 +1,29 @@ +tests: + - healthStatus: + status: Healthy + message: DNS-SD browse and lookup results match the advertised DNS records + inputPath: testdata/healthy.yaml + - healthStatus: + status: Progressing + message: DNS-SD browse could not find this instance + inputPath: testdata/progressing_negativeBrowse.yaml + - healthStatus: + status: Progressing + message: DNS-SD lookup could not find this instance + inputPath: testdata/progressing_negativeLookup.yaml + - healthStatus: + status: Degraded + message: none of the configured providers can advertise on "example.org" + inputPath: testdata/degraded_notAdopted.yaml + - healthStatus: + status: Degraded + message: "" + inputPath: testdata/degraded_advertiseError.yaml + - healthStatus: + status: Degraded + message: "" + inputPath: testdata/degraded_unadvertiseError.yaml + - healthStatus: + status: Unknown + message: "" + inputPath: testdata/unknown_discoveryError.yaml diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_advertiseError.yaml b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_advertiseError.yaml new file mode 100644 index 0000000..905b2e9 --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_advertiseError.yaml @@ -0,0 +1,35 @@ +apiVersion: proclaim.dogmatiq.io/v1 +kind: DNSSDServiceInstance +metadata: + creationTimestamp: "2023-03-20T01:47:37Z" + finalizers: + - proclaim.dogmatiq.io/unadvertise + generation: 2 + name: test-instance + namespace: proclaim + resourceVersion: "308914" + uid: 991a66a3-9b7e-4515-9a41-f7513e9b7b33 +spec: + instance: + attributes: + - baz: qux + flag: "" + foo: bar + - more: attrs + domain: example.org + name: test-instance + serviceType: _proclaim._tcp + targets: + - host: test.example.org + port: 8080 + priority: 0 + weight: 0 + ttl: 1m0s +status: + conditions: + - lastTransitionTime: "2023-03-20T01:47:40Z" + message: "" + observedGeneration: 2 + reason: AdvertiseError + status: "False" + type: Advertised diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_notAdopted.yaml b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_notAdopted.yaml new file mode 100644 index 0000000..efccdb2 --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_notAdopted.yaml @@ -0,0 +1,35 @@ +apiVersion: proclaim.dogmatiq.io/v1 +kind: DNSSDServiceInstance +metadata: + creationTimestamp: "2023-03-20T01:47:37Z" + finalizers: + - proclaim.dogmatiq.io/unadvertise + generation: 2 + name: test-instance + namespace: proclaim + resourceVersion: "308914" + uid: 991a66a3-9b7e-4515-9a41-f7513e9b7b33 +spec: + instance: + attributes: + - baz: qux + flag: "" + foo: bar + - more: attrs + domain: example.org + name: test-instance + serviceType: _proclaim._tcp + targets: + - host: test.example.org + port: 8080 + priority: 0 + weight: 0 + ttl: 1m0s +status: + conditions: + - lastTransitionTime: "2023-03-20T01:47:40Z" + message: none of the configured providers can advertise on "example.org" + observedGeneration: 2 + reason: InstanceIgnored + status: "False" + type: Adopted diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_unadvertiseError.yaml b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_unadvertiseError.yaml new file mode 100644 index 0000000..552eadb --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/degraded_unadvertiseError.yaml @@ -0,0 +1,35 @@ +apiVersion: proclaim.dogmatiq.io/v1 +kind: DNSSDServiceInstance +metadata: + creationTimestamp: "2023-03-20T01:47:37Z" + finalizers: + - proclaim.dogmatiq.io/unadvertise + generation: 2 + name: test-instance + namespace: proclaim + resourceVersion: "308914" + uid: 991a66a3-9b7e-4515-9a41-f7513e9b7b33 +spec: + instance: + attributes: + - baz: qux + flag: "" + foo: bar + - more: attrs + domain: example.org + name: test-instance + serviceType: _proclaim._tcp + targets: + - host: test.example.org + port: 8080 + priority: 0 + weight: 0 + ttl: 1m0s +status: + conditions: + - lastTransitionTime: "2023-03-20T01:47:40Z" + message: "" + observedGeneration: 2 + reason: UnadvertiseError + status: "False" + type: Advertised diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/healthy.yaml b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/healthy.yaml new file mode 100644 index 0000000..f8ad890 --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/healthy.yaml @@ -0,0 +1,35 @@ +apiVersion: proclaim.dogmatiq.io/v1 +kind: DNSSDServiceInstance +metadata: + creationTimestamp: "2023-03-20T01:47:37Z" + finalizers: + - proclaim.dogmatiq.io/unadvertise + generation: 2 + name: test-instance + namespace: proclaim + resourceVersion: "308914" + uid: 991a66a3-9b7e-4515-9a41-f7513e9b7b33 +spec: + instance: + attributes: + - baz: qux + flag: "" + foo: bar + - more: attrs + domain: example.org + name: test-instance + serviceType: _proclaim._tcp + targets: + - host: test.example.org + port: 8080 + priority: 0 + weight: 0 + ttl: 1m0s +status: + conditions: + - lastTransitionTime: "2023-03-20T01:47:40Z" + message: DNS-SD browse and lookup results match the advertised DNS records + observedGeneration: 2 + reason: Discovered + status: "True" + type: Discoverable diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/progressing_negativeBrowse.yaml b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/progressing_negativeBrowse.yaml new file mode 100644 index 0000000..e34e6c1 --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/progressing_negativeBrowse.yaml @@ -0,0 +1,35 @@ +apiVersion: proclaim.dogmatiq.io/v1 +kind: DNSSDServiceInstance +metadata: + creationTimestamp: "2023-03-20T01:47:37Z" + finalizers: + - proclaim.dogmatiq.io/unadvertise + generation: 2 + name: test-instance + namespace: proclaim + resourceVersion: "308914" + uid: 991a66a3-9b7e-4515-9a41-f7513e9b7b33 +spec: + instance: + attributes: + - baz: qux + flag: "" + foo: bar + - more: attrs + domain: example.org + name: test-instance + serviceType: _proclaim._tcp + targets: + - host: test.example.org + port: 8080 + priority: 0 + weight: 0 + ttl: 1m0s +status: + conditions: + - lastTransitionTime: "2023-03-20T01:47:40Z" + message: DNS-SD browse could not find this instance + observedGeneration: 2 + reason: NegativeBrowseResult + status: "False" + type: Discoverable diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/progressing_negativeLookup.yaml b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/progressing_negativeLookup.yaml new file mode 100644 index 0000000..a563e7c --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/progressing_negativeLookup.yaml @@ -0,0 +1,35 @@ +apiVersion: proclaim.dogmatiq.io/v1 +kind: DNSSDServiceInstance +metadata: + creationTimestamp: "2023-03-20T01:47:37Z" + finalizers: + - proclaim.dogmatiq.io/unadvertise + generation: 2 + name: test-instance + namespace: proclaim + resourceVersion: "308914" + uid: 991a66a3-9b7e-4515-9a41-f7513e9b7b33 +spec: + instance: + attributes: + - baz: qux + flag: "" + foo: bar + - more: attrs + domain: example.org + name: test-instance + serviceType: _proclaim._tcp + targets: + - host: test.example.org + port: 8080 + priority: 0 + weight: 0 + ttl: 1m0s +status: + conditions: + - lastTransitionTime: "2023-03-20T01:47:40Z" + message: DNS-SD lookup could not find this instance + observedGeneration: 2 + reason: NegativeLookupResult + status: "False" + type: Discoverable diff --git a/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/unknown_discoveryError.yaml b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/unknown_discoveryError.yaml new file mode 100644 index 0000000..c6139a5 --- /dev/null +++ b/pkg/resource_customizations/proclaim.dogmatiq.io/DNSSDServiceInstance/testdata/unknown_discoveryError.yaml @@ -0,0 +1,35 @@ +apiVersion: proclaim.dogmatiq.io/v1 +kind: DNSSDServiceInstance +metadata: + creationTimestamp: "2023-03-20T01:47:37Z" + finalizers: + - proclaim.dogmatiq.io/unadvertise + generation: 2 + name: test-instance + namespace: proclaim + resourceVersion: "308914" + uid: 991a66a3-9b7e-4515-9a41-f7513e9b7b33 +spec: + instance: + attributes: + - baz: qux + flag: "" + foo: bar + - more: attrs + domain: example.org + name: test-instance + serviceType: _proclaim._tcp + targets: + - host: test.example.org + port: 8080 + priority: 0 + weight: 0 + ttl: 1m0s +status: + conditions: + - lastTransitionTime: "2023-03-20T01:47:40Z" + message: "" + observedGeneration: 2 + reason: DiscoveryError + status: "Unknown" + type: Discoverable diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/health.lua b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/health_test.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/dependency_not_found.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..380766d --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubSubscription +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..eda7ae0 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubSubscription +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/up_to_date.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/up_to_date.yaml new file mode 100644 index 0000000..f108481 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubSubscription +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/update_failed.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/update_failed.yaml new file mode 100644 index 0000000..d949420 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubSubscription +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/update_in_progress.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/update_in_progress.yaml new file mode 100644 index 0000000..6e25db0 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubSubscription/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubSubscription +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/health.lua b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/health_test.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/dependency_not_found.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..0c0c72b --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubTopic +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..4756b9f --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubTopic +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/up_to_date.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/up_to_date.yaml new file mode 100644 index 0000000..38e9479 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubTopic +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/update_failed.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/update_failed.yaml new file mode 100644 index 0000000..b528af9 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubTopic +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/update_in_progress.yaml b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/update_in_progress.yaml new file mode 100644 index 0000000..fab67f5 --- /dev/null +++ b/pkg/resource_customizations/pubsub.cnrm.cloud.google.com/PubSubTopic/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubTopic +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/health.lua b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/health.lua new file mode 100644 index 0000000..d614828 --- /dev/null +++ b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/health.lua @@ -0,0 +1,38 @@ +local hs = {} +if obj.status ~= nil then + + if obj.status.state == "initializing" then + hs.status = "Progressing" + hs.message = obj.status.ready .. "/" .. obj.status.size .. " node(s) are ready" + return hs + end + + if obj.status.state == "ready" then + hs.status = "Healthy" + hs.message = obj.status.ready .. "/" .. obj.status.size .. " node(s) are ready" + return hs + end + + if obj.status.state == "paused" then + hs.status = "Unknown" + hs.message = "Cluster is paused" + return hs + end + + if obj.status.state == "stopping" then + hs.status = "Degraded" + hs.message = "Cluster is stopping (" .. obj.status.ready .. "/" .. obj.status.size .. " node(s) are ready)" + return hs + end + + if obj.status.state == "error" then + hs.status = "Degraded" + hs.message = "Cluster is on error: " .. table.concat(obj.status.message, ", ") + return hs + end + +end + +hs.status = "Unknown" +hs.message = "Cluster status is unknown. Ensure your ArgoCD is current and then check for/file a bug report: https://github.com/argoproj/argo-cd/issues" +return hs diff --git a/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/health_test.yaml b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/health_test.yaml new file mode 100644 index 0000000..73b9968 --- /dev/null +++ b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/health_test.yaml @@ -0,0 +1,25 @@ +tests: +- healthStatus: + status: Progressing + message: "0/1 node(s) are ready" + inputPath: testdata/initializing.yaml +- healthStatus: + status: Healthy + message: "1/1 node(s) are ready" + inputPath: testdata/ready.yaml +- healthStatus: + status: Unknown + message: "Cluster is paused" + inputPath: testdata/paused.yaml +- healthStatus: + status: Degraded + message: "Cluster is stopping (1/2 node(s) are ready)" + inputPath: testdata/stopping.yaml +- healthStatus: + status: Degraded + message: "Cluster is on error: we lost node" + inputPath: testdata/error.yaml +- healthStatus: + status: Unknown + message: "Cluster status is unknown. Ensure your ArgoCD is current and then check for/file a bug report: https://github.com/argoproj/argo-cd/issues" + inputPath: testdata/unknown.yaml diff --git a/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/error.yaml b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/error.yaml new file mode 100644 index 0000000..4a37335 --- /dev/null +++ b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/error.yaml @@ -0,0 +1,24 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: quickstart +spec: {} +status: + backup: {} + haproxy: {} + host: pxc-mysql-pxc + logcollector: {} + observedGeneration: 1 + pmm: {} + proxysql: {} + pxc: + image: "" + ready: 1 + size: 2 + status: error + version: 8.0.21-12.1 + ready: 1 + size: 2 + state: error + message: + - we lost node diff --git a/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/initializing.yaml b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/initializing.yaml new file mode 100644 index 0000000..11f3ff0 --- /dev/null +++ b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/initializing.yaml @@ -0,0 +1,22 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: quickstart +spec: {} +status: + backup: {} + haproxy: {} + host: pxc-mysql-pxc + logcollector: {} + observedGeneration: 1 + pmm: {} + proxysql: {} + pxc: + image: '' + ready: 0 + size: 1 + status: initializing + version: 8.0.21-12.1 + ready: 0 + size: 1 + state: initializing diff --git a/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/paused.yaml b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/paused.yaml new file mode 100644 index 0000000..46440a2 --- /dev/null +++ b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/paused.yaml @@ -0,0 +1,22 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: quickstart +spec: {} +status: + backup: {} + haproxy: {} + host: pxc-mysql-pxc + logcollector: {} + observedGeneration: 1 + pmm: {} + proxysql: {} + pxc: + image: '' + ready: 1 + size: 1 + status: paused + version: 8.0.21-12.1 + ready: 1 + size: 1 + state: paused diff --git a/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/ready.yaml b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/ready.yaml new file mode 100644 index 0000000..bd7d82a --- /dev/null +++ b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/ready.yaml @@ -0,0 +1,22 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: quickstart +spec: {} +status: + backup: {} + haproxy: {} + host: pxc-mysql-pxc + logcollector: {} + observedGeneration: 1 + pmm: {} + proxysql: {} + pxc: + image: '' + ready: 1 + size: 1 + status: ready + version: 8.0.21-12.1 + ready: 1 + size: 1 + state: ready diff --git a/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/stopping.yaml b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/stopping.yaml new file mode 100644 index 0000000..f527445 --- /dev/null +++ b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/stopping.yaml @@ -0,0 +1,22 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: quickstart +spec: {} +status: + backup: {} + haproxy: {} + host: pxc-mysql-pxc + logcollector: {} + observedGeneration: 1 + pmm: {} + proxysql: {} + pxc: + image: '' + ready: 1 + size: 2 + status: stopping + version: 8.0.21-12.1 + ready: 1 + size: 2 + state: stopping diff --git a/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/unknown.yaml b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/unknown.yaml new file mode 100644 index 0000000..c12b046 --- /dev/null +++ b/pkg/resource_customizations/pxc.percona.com/PerconaXtraDBCluster/testdata/unknown.yaml @@ -0,0 +1,22 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: quickstart +spec: {} +status: + backup: {} + haproxy: {} + host: pxc-mysql-pxc + logcollector: {} + observedGeneration: 1 + pmm: {} + proxysql: {} + pxc: + image: '' + ready: 1 + size: 1 + status: dontknow + version: 8.0.21-12.1 + ready: 1 + size: 1 + state: dontknow diff --git a/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/health.lua b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/health_test.yaml b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/dependency_not_found.yaml b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..df457a1 --- /dev/null +++ b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 +kind: Project +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..5988921 --- /dev/null +++ b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 +kind: Project +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/up_to_date.yaml b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/up_to_date.yaml new file mode 100644 index 0000000..6f122aa --- /dev/null +++ b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 +kind: Project +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/update_failed.yaml b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/update_failed.yaml new file mode 100644 index 0000000..6ea7bc8 --- /dev/null +++ b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 +kind: Project +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/update_in_progress.yaml b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/update_in_progress.yaml new file mode 100644 index 0000000..0a69398 --- /dev/null +++ b/pkg/resource_customizations/resourcemanager.cnrm.cloud.google.com/Project/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 +kind: Project +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/rollouts.kruise.io/Rollout/health.lua b/pkg/resource_customizations/rollouts.kruise.io/Rollout/health.lua new file mode 100644 index 0000000..5fd4ddb --- /dev/null +++ b/pkg/resource_customizations/rollouts.kruise.io/Rollout/health.lua @@ -0,0 +1,31 @@ +hs={ status = "Progressing", message = "Rollout is still progressing" } + +if obj.metadata.generation == obj.status.observedGeneration then + + if obj.status.canaryStatus.currentStepState == "StepUpgrade" and obj.status.phase == "Progressing" then + hs.status = "Progressing" + hs.message = "Rollout is still progressing" + return hs + end + + if obj.status.canaryStatus.currentStepState == "StepPaused" and obj.status.phase == "Progressing" then + hs.status = "Suspended" + hs.message = "Rollout is Paused need manual intervention" + return hs + end + + if obj.status.canaryStatus.currentStepState == "Completed" and obj.status.phase == "Healthy" then + hs.status = "Healthy" + hs.message = "Rollout is Completed" + return hs + end + + if obj.status.canaryStatus.currentStepState == "StepPaused" and (obj.status.phase == "Terminating" or obj.status.phase == "Disabled") then + hs.status = "Degraded" + hs.message = "Rollout is Disabled or Terminating" + return hs + end + +end + +return hs diff --git a/pkg/resource_customizations/rollouts.kruise.io/Rollout/health_test.yaml b/pkg/resource_customizations/rollouts.kruise.io/Rollout/health_test.yaml new file mode 100644 index 0000000..c89ea34 --- /dev/null +++ b/pkg/resource_customizations/rollouts.kruise.io/Rollout/health_test.yaml @@ -0,0 +1,17 @@ +tests: + - healthStatus: + status: Healthy + message: "Rollout is Completed" + inputPath: testdata/healthy.yaml + - healthStatus: + status: Degraded + message: "Rollout is Disabled or Terminating" + inputPath: testdata/degraded.yaml + - healthStatus: + status: Progressing + message: "Rollout is still progressing" + inputPath: testdata/progressing.yaml + - healthStatus: + status: Suspended + message: "Rollout is Paused need manual intervention" + inputPath: testdata/suspended.yaml diff --git a/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/degraded.yaml b/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/degraded.yaml new file mode 100644 index 0000000..97c40f1 --- /dev/null +++ b/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/degraded.yaml @@ -0,0 +1,50 @@ +apiVersion: rollouts.kruise.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo + namespace: default + annotations: + rollouts.kruise.io/rolling-style: partition + generation: 5 +spec: + objectRef: + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: workload-demo + strategy: + canary: + steps: + - replicas: 1 + pause: + duration: 0 + - replicas: 50% + pause: + duration: 0 + - replicas: 100% + +status: + canaryStatus: + canaryReadyReplicas: 1 + canaryReplicas: 1 + canaryRevision: 76fd76f75b + currentStepIndex: 1 + currentStepState: StepPaused + lastUpdateTime: '2023-09-23T11:44:39Z' + message: BatchRelease is at state Ready, rollout-id , step 1 + observedWorkloadGeneration: 7 + podTemplateHash: 76fd76f75b + rolloutHash: 77cxd69w47b7bwddwv2w7vxvb4xxdbwcx9x289vw69w788w4w6z4x8dd4vbz2zbw + stableRevision: 6bfdfb5bfb + conditions: + - lastTransitionTime: '2023-09-23T11:44:09Z' + lastUpdateTime: '2023-09-23T11:44:09Z' + message: Rollout is in Progressing + reason: InRolling + status: 'True' + type: Progressing + message: >- + Rollout is in step(1/3), and you need manually confirm to enter the next + step + observedGeneration: 5 + phase: Disabled diff --git a/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/healthy.yaml b/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/healthy.yaml new file mode 100644 index 0000000..77743b5 --- /dev/null +++ b/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/healthy.yaml @@ -0,0 +1,56 @@ +apiVersion: rollouts.kruise.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo + namespace: default + annotations: + rollouts.kruise.io/rolling-style: partition + generation: 7 +spec: + objectRef: + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: workload-demo + strategy: + canary: + steps: + - replicas: 1 + pause: + duration: 0 + - replicas: 50% + pause: + duration: 0 + - replicas: 100% + +status: + canaryStatus: + canaryReadyReplicas: 10 + canaryReplicas: 10 + canaryRevision: 76fd76f75b + currentStepIndex: 3 + currentStepState: Completed + lastUpdateTime: '2023-09-23T11:48:58Z' + message: BatchRelease is at state Ready, rollout-id , step 3 + observedWorkloadGeneration: 22 + podTemplateHash: 76fd76f75b + rolloutHash: 77cxd69w47b7bwddwv2w7vxvb4xxdbwcx9x289vw69w788w4w6z4x8dd4vbz2zbw + stableRevision: 6bfdfb5bfb + conditions: + - lastTransitionTime: '2023-09-23T11:44:09Z' + lastUpdateTime: '2023-09-23T11:44:09Z' + message: Rollout progressing has been completed + reason: Completed + status: 'False' + type: Progressing + - lastTransitionTime: '2023-09-23T11:49:01Z' + lastUpdateTime: '2023-09-23T11:49:01Z' + message: '' + reason: '' + status: 'True' + type: Succeeded + message: Rollout progressing has been completed + observedGeneration: 7 + phase: Healthy + + diff --git a/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/progressing.yaml b/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/progressing.yaml new file mode 100644 index 0000000..f84d395 --- /dev/null +++ b/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/progressing.yaml @@ -0,0 +1,48 @@ +apiVersion: rollouts.kruise.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo + namespace: default + annotations: + rollouts.kruise.io/rolling-style: partition + generation: 5 +spec: + objectRef: + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: workload-demo + strategy: + canary: + steps: + - replicas: 1 + pause: + duration: 0 + - replicas: 50% + pause: + duration: 0 + - replicas: 100% + +status: + canaryStatus: + canaryReadyReplicas: 0 + canaryReplicas: 1 + canaryRevision: 76fd76f75b + currentStepIndex: 1 + currentStepState: StepUpgrade + lastUpdateTime: '2023-09-23T11:44:12Z' + message: BatchRelease is at state Verifying, rollout-id , step 1 + observedWorkloadGeneration: 6 + podTemplateHash: 76fd76f75b + rolloutHash: 77cxd69w47b7bwddwv2w7vxvb4xxdbwcx9x289vw69w788w4w6z4x8dd4vbz2zbw + stableRevision: 6bfdfb5bfb + conditions: + - lastTransitionTime: '2023-09-23T11:44:09Z' + lastUpdateTime: '2023-09-23T11:44:09Z' + message: Rollout is in Progressing + reason: InRolling + status: 'True' + type: Progressing + message: Rollout is in step(1/3), and upgrade workload to new version + observedGeneration: 5 + phase: Progressing diff --git a/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/suspended.yaml b/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/suspended.yaml new file mode 100644 index 0000000..77a6712 --- /dev/null +++ b/pkg/resource_customizations/rollouts.kruise.io/Rollout/testdata/suspended.yaml @@ -0,0 +1,50 @@ +apiVersion: rollouts.kruise.io/v1alpha1 +kind: Rollout +metadata: + name: rollouts-demo + namespace: default + annotations: + rollouts.kruise.io/rolling-style: partition + generation: 5 +spec: + objectRef: + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: workload-demo + strategy: + canary: + steps: + - replicas: 1 + pause: + duration: 0 + - replicas: 50% + pause: + duration: 0 + - replicas: 100% + +status: + canaryStatus: + canaryReadyReplicas: 1 + canaryReplicas: 1 + canaryRevision: 76fd76f75b + currentStepIndex: 1 + currentStepState: StepPaused + lastUpdateTime: '2023-09-23T11:44:39Z' + message: BatchRelease is at state Ready, rollout-id , step 1 + observedWorkloadGeneration: 7 + podTemplateHash: 76fd76f75b + rolloutHash: 77cxd69w47b7bwddwv2w7vxvb4xxdbwcx9x289vw69w788w4w6z4x8dd4vbz2zbw + stableRevision: 6bfdfb5bfb + conditions: + - lastTransitionTime: '2023-09-23T11:44:09Z' + lastUpdateTime: '2023-09-23T11:44:09Z' + message: Rollout is in Progressing + reason: InRolling + status: 'True' + type: Progressing + message: >- + Rollout is in step(1/3), and you need manually confirm to enter the next + step + observedGeneration: 5 + phase: Progressing diff --git a/pkg/resource_customizations/route.openshift.io/Route/health.lua b/pkg/resource_customizations/route.openshift.io/Route/health.lua new file mode 100644 index 0000000..5a8400b --- /dev/null +++ b/pkg/resource_customizations/route.openshift.io/Route/health.lua @@ -0,0 +1,37 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.ingress ~= nil then + local numIngressRules = 0 + local numTrue = 0 + local numFalse = 0 + for _, ingressRules in pairs(obj.status.ingress) do + numIngressRules = numIngressRules + 1 + if obj.status.ingress ~= nil then + for _, condition in pairs(ingressRules.conditions) do + if condition.type == "Admitted" and condition.status == "True" then + numTrue = numTrue + 1 + elseif condition.type == "Admitted" and condition.status == "False" then + numFalse = numFalse + 1 + end + end + end + health_status.status = 'Test' + end + if numTrue == numIngressRules then + health_status.status = "Healthy" + health_status.message = "Route is healthy" + return health_status + elseif numFalse > 0 then + health_status.status = "Degraded" + health_status.message = "Route is degraded" + return health_status + else + health_status.status = "Progressing" + health_status.message = "Route is still getting admitted" + return health_status + end + end +end +health_status.status = "Progressing" +health_status.message = "Route is still getting admitted" +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/route.openshift.io/Route/health_test.yaml b/pkg/resource_customizations/route.openshift.io/Route/health_test.yaml new file mode 100644 index 0000000..1293d7d --- /dev/null +++ b/pkg/resource_customizations/route.openshift.io/Route/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: 'Route is still getting admitted' + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: 'Route is degraded' + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: 'Route is healthy' + inputPath: testdata/healthy.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/route.openshift.io/Route/testdata/degraded.yaml b/pkg/resource_customizations/route.openshift.io/Route/testdata/degraded.yaml new file mode 100644 index 0000000..9d6ec91 --- /dev/null +++ b/pkg/resource_customizations/route.openshift.io/Route/testdata/degraded.yaml @@ -0,0 +1,43 @@ +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + annotations: + openshift.io/host.generated: 'true' + resourceVersion: '187177' + name: openshift-gitops-server + namespace: openshift-gitops + ownerReferences: + - apiVersion: argoproj.io/v1alpha1 + kind: ArgoCD + name: openshift-gitops + uid: 09443427-36c8-4680-9a4b-602ae8a45b89 + controller: true + blockOwnerDeletion: true + labels: + app.kubernetes.io/managed-by: openshift-gitops + app.kubernetes.io/name: openshift-gitops-server + app.kubernetes.io/part-of: argocd +spec: + host: >- + openshift-gitops-server-openshift-gitops.apps.dev-svc-4.8-083007.devcluster.openshift.com + to: + kind: Service + name: openshift-gitops-server + weight: 100 + port: + targetPort: https + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + wildcardPolicy: None +status: + ingress: + - host: >- + openshift-gitops-server-openshift-gitops.apps.dev-svc-4.8-083007.devcluster.openshift.com + routerName: default + conditions: + - type: Admitted + status: 'False' + lastTransitionTime: '2021-08-30T12:13:34Z' + wildcardPolicy: None + routerCanonicalHostname: router-default.apps.dev-svc-4.8-083007.devcluster.openshift.com diff --git a/pkg/resource_customizations/route.openshift.io/Route/testdata/healthy.yaml b/pkg/resource_customizations/route.openshift.io/Route/testdata/healthy.yaml new file mode 100644 index 0000000..a5eabfc --- /dev/null +++ b/pkg/resource_customizations/route.openshift.io/Route/testdata/healthy.yaml @@ -0,0 +1,43 @@ +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + annotations: + openshift.io/host.generated: 'true' + resourceVersion: '187177' + name: openshift-gitops-server + namespace: openshift-gitops + ownerReferences: + - apiVersion: argoproj.io/v1alpha1 + kind: ArgoCD + name: openshift-gitops + uid: 09443427-36c8-4680-9a4b-602ae8a45b89 + controller: true + blockOwnerDeletion: true + labels: + app.kubernetes.io/managed-by: openshift-gitops + app.kubernetes.io/name: openshift-gitops-server + app.kubernetes.io/part-of: argocd +spec: + host: >- + openshift-gitops-server-openshift-gitops.apps.dev-svc-4.8-083007.devcluster.openshift.com + to: + kind: Service + name: openshift-gitops-server + weight: 100 + port: + targetPort: https + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + wildcardPolicy: None +status: + ingress: + - host: >- + openshift-gitops-server-openshift-gitops.apps.dev-svc-4.8-083007.devcluster.openshift.com + routerName: default + conditions: + - type: Admitted + status: 'True' + lastTransitionTime: '2021-08-30T12:13:34Z' + wildcardPolicy: None + routerCanonicalHostname: router-default.apps.dev-svc-4.8-083007.devcluster.openshift.com diff --git a/pkg/resource_customizations/route.openshift.io/Route/testdata/progressing.yaml b/pkg/resource_customizations/route.openshift.io/Route/testdata/progressing.yaml new file mode 100644 index 0000000..c2d219b --- /dev/null +++ b/pkg/resource_customizations/route.openshift.io/Route/testdata/progressing.yaml @@ -0,0 +1,43 @@ +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + annotations: + openshift.io/host.generated: 'true' + resourceVersion: '187177' + name: openshift-gitops-server + namespace: openshift-gitops + ownerReferences: + - apiVersion: argoproj.io/v1alpha1 + kind: ArgoCD + name: openshift-gitops + uid: 09443427-36c8-4680-9a4b-602ae8a45b89 + controller: true + blockOwnerDeletion: true + labels: + app.kubernetes.io/managed-by: openshift-gitops + app.kubernetes.io/name: openshift-gitops-server + app.kubernetes.io/part-of: argocd +spec: + host: >- + openshift-gitops-server-openshift-gitops.apps.dev-svc-4.8-083007.devcluster.openshift.com + to: + kind: Service + name: openshift-gitops-server + weight: 100 + port: + targetPort: https + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + wildcardPolicy: None +status: + ingress: + - host: >- + openshift-gitops-server-openshift-gitops.apps.dev-svc-4.8-083007.devcluster.openshift.com + routerName: default + conditions: + - type: Admitted + status: 'Unknown' + lastTransitionTime: '2021-08-30T12:13:34Z' + wildcardPolicy: None + routerCanonicalHostname: router-default.apps.dev-svc-4.8-083007.devcluster.openshift.com diff --git a/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/health_test.yaml b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/health_test.yaml new file mode 100644 index 0000000..aa83951 --- /dev/null +++ b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/health_test.yaml @@ -0,0 +1,25 @@ +tests: +- healthStatus: + status: Progressing + message: Waiting for resourcrecordset to be available + inputPath: testdata/progressing_creating.yaml +- healthStatus: + status: Progressing + message: Waiting for resourcrecordset to be created + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Degraded + message: >- + create failed: failed to create the ResourceRecordSet resource: + InvalidChangeBatch: [RRSet of type CNAME with DNS name + www.crossplane.io. is not permitted as it conflicts with other + records with the same DNS name in zone crossplane.io.] + inputPath: testdata/degraded_reconcileError.yaml +- healthStatus: + status: Suspended + message: ReconcilePaused + inputPath: testdata/suspended_reconcilePaused.yaml +- healthStatus: + status: Healthy + message: Available + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/heatlh.lua b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/heatlh.lua new file mode 100644 index 0000000..0cf5253 --- /dev/null +++ b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/heatlh.lua @@ -0,0 +1,41 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + local ready = false + local synced = false + local suspended = false + for i, condition in ipairs(obj.status.conditions) do + + if condition.type == "Ready" then + ready = condition.status == "True" + ready_message = condition.reason + elseif condition.type == "Synced" then + synced = condition.status == "True" + if condition.reason == "ReconcileError" then + synced_message = condition.message + elseif condition.reason == "ReconcilePaused" then + suspended = true + suspended_message = condition.reason + end + end + end + if ready and synced then + hs.status = "Healthy" + hs.message = ready_message + elseif synced == false and suspended == true then + hs.status = "Suspended" + hs.message = suspended_message + elseif ready == false and synced == true and suspended == false then + hs.status = "Progressing" + hs.message = "Waiting for resourcrecordset to be available" + else + hs.status = "Degraded" + hs.message = synced_message + end + return hs + end +end + +hs.status = "Progressing" +hs.message = "Waiting for resourcrecordset to be created" +return hs diff --git a/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/degraded_reconcileError.yaml b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/degraded_reconcileError.yaml new file mode 100644 index 0000000..31bc512 --- /dev/null +++ b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/degraded_reconcileError.yaml @@ -0,0 +1,35 @@ +apiVersion: route53.aws.crossplane.io/v1alpha1 +kind: ResourceRecordSet +metadata: + creationTimestamp: '2024-01-11T03:48:32Z' + generation: 1 + name: www-domain + resourceVersion: '187731157' + selfLink: /apis/route53.aws.crossplane.io/v1alpha1/resourcerecordsets/www-domain + uid: c9c85395-0830-4549-b255-e9e426663547 +spec: + providerConfigRef: + name: crossplane + forProvider: + resourceRecords: + - value: www.crossplane.io + setIdentifier: www + ttl: 60 + type: CNAME + weight: 0 + zoneId: ABCDEFGAB07CD +status: + conditions: + - lastTransitionTime: '2024-01-11T03:48:57Z' + message: >- + create failed: failed to create the ResourceRecordSet resource: + InvalidChangeBatch: [RRSet of type CNAME with DNS name + www.crossplane.io. is not permitted as it conflicts with other + records with the same DNS name in zone crossplane.io.] + reason: ReconcileError + status: 'False' + type: Synced + - lastTransitionTime: '2024-01-11T03:48:34Z' + reason: Creating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/healthy.yaml b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/healthy.yaml new file mode 100644 index 0000000..f808e46 --- /dev/null +++ b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/healthy.yaml @@ -0,0 +1,29 @@ +apiVersion: route53.aws.crossplane.io/v1alpha1 +kind: ResourceRecordSet +metadata: + creationTimestamp: "2023-11-16T04:44:19Z" + generation: 4 + name: www-domain + resourceVersion: "140397563" + selfLink: /apis/route53.aws.crossplane.io/v1alpha1/resourcerecordsets/www-domain + uid: 11f0d48d-134f-471b-9340-b6d45d953fcb +spec: + providerConfigRef: + name: crossplane + forProvider: + zoneId: A1B2C3D4 + type: A + aliasTarget: + dnsName: abcdefg.cloudfront.net. + evaluateTargetHealth: false + hostedZoneId: AZBZCZDEFG +status: + conditions: + - lastTransitionTime: "2023-11-16T04:44:27Z" + reason: Available + status: "True" + type: Ready + - lastTransitionTime: "2023-11-16T04:44:25Z" + reason: ReconcileSuccess + status: "True" + type: Synced diff --git a/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/progressing_creating.yaml b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/progressing_creating.yaml new file mode 100644 index 0000000..abf5977 --- /dev/null +++ b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/progressing_creating.yaml @@ -0,0 +1,29 @@ +apiVersion: route53.aws.crossplane.io/v1alpha1 +kind: ResourceRecordSet +metadata: + creationTimestamp: "2023-11-16T04:44:19Z" + generation: 4 + name: www-domain + resourceVersion: "140397563" + selfLink: /apis/route53.aws.crossplane.io/v1alpha1/resourcerecordsets/www-domain + uid: 11f0d48d-134f-471b-9340-b6d45d953fcb +spec: + providerConfigRef: + name: crossplane + forProvider: + zoneId: A1B2C3D4 + type: A + aliasTarget: + dnsName: abcdefg.cloudfront.net. + evaluateTargetHealth: false + hostedZoneId: AZBZCZDEFG +status: + conditions: + - lastTransitionTime: "2023-11-16T04:44:27Z" + reason: Creating + status: "False" + type: Ready + - lastTransitionTime: "2023-11-16T04:44:25Z" + reason: ReconcileSuccess + status: "True" + type: Synced diff --git a/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..28d778d --- /dev/null +++ b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/progressing_noStatus.yaml @@ -0,0 +1,19 @@ +apiVersion: route53.aws.crossplane.io/v1alpha1 +kind: ResourceRecordSet +metadata: + creationTimestamp: "2023-11-16T04:44:19Z" + generation: 4 + name: www-domain + resourceVersion: "140397563" + selfLink: /apis/route53.aws.crossplane.io/v1alpha1/resourcerecordsets/www-domain + uid: 11f0d48d-134f-471b-9340-b6d45d953fcb +spec: + providerConfigRef: + name: crossplane + forProvider: + zoneId: A1B2C3D4 + type: A + aliasTarget: + dnsName: abcdefg.cloudfront.net. + evaluateTargetHealth: false + hostedZoneId: AZBZCZDEFG diff --git a/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/suspended_reconcilePaused.yaml b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/suspended_reconcilePaused.yaml new file mode 100644 index 0000000..522c0e8 --- /dev/null +++ b/pkg/resource_customizations/route53.aws.crossplane.io/ResourceRecordSet/testdata/suspended_reconcilePaused.yaml @@ -0,0 +1,27 @@ +apiVersion: route53.aws.crossplane.io/v1alpha1 +kind: ResourceRecordSet +metadata: + annotations: + crossplane.io/paused: "true" + creationTimestamp: "2024-01-11T04:16:15Z" + generation: 1 + name: www-domain + resourceVersion: "187746011" + uid: 5517b419-5052-43d9-941e-c32f60d8c7e5 +spec: + providerConfigRef: + name: crossplane + forProvider: + resourceRecords: + - value: www.crossplane.io + setIdentifier: www + ttl: 60 + type: CNAME + weight: 0 + zoneId: ABCDEFGAB07CD +status: + conditions: + - lastTransitionTime: "2024-01-11T04:16:16Z" + reason: ReconcilePaused + status: "False" + type: Synced diff --git a/pkg/resource_customizations/serving.knative.dev/Service/health.lua b/pkg/resource_customizations/serving.knative.dev/Service/health.lua new file mode 100644 index 0000000..2a8c8a4 --- /dev/null +++ b/pkg/resource_customizations/serving.knative.dev/Service/health.lua @@ -0,0 +1,40 @@ +local health_status = {} +health_status.status = "Progressing" +health_status.message = "Waiting for status update." +if obj.status ~= nil and obj.status.conditions ~= nil then + local status_true = 0 + local status_false = 0 + local status_unknown = 0 + health_status.message = "" + for i, condition in pairs(obj.status.conditions) do + if condition.status == "True" and (condition.type == "ConfigurationsReady" or condition.type == "RoutesReady" or condition.type == "Ready") then + status_true = status_true + 1 + elseif condition.status == "False" or condition.status == "Unknown" then + msg = condition.type .. " is " .. condition.status + if condition.reason ~= nil and condition.reason ~= "" then + msg = msg .. ", since " .. condition.reason .. "." + end + if condition.message ~= nil and condition.message ~= "" then + msg = msg .. " " .. condition.message + end + health_status.message = health_status.message .. msg .. "\n" + if condition.status == "False" then + status_false = status_false + 1 + else + status_unknown = status_unknown + 1 + end + end + end + if status_true == 3 and status_false == 0 and status_unknown == 0 then + health_status.message = "Knative Service is healthy." + health_status.status = "Healthy" + return health_status + elseif status_false > 0 then + health_status.status = "Degraded" + return health_status + else + health_status.status = "Progressing" + return health_status + end +end +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/serving.knative.dev/Service/health_test.yaml b/pkg/resource_customizations/serving.knative.dev/Service/health_test.yaml new file mode 100644 index 0000000..88ddbf4 --- /dev/null +++ b/pkg/resource_customizations/serving.knative.dev/Service/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "Ready is Unknown, since RolloutInProgress. A gradual rollout of the latest revision(s) is in progress.\nRoutesReady is Unknown, since RolloutInProgress. A gradual rollout of the latest revision(s) is in progress.\n" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "ConfigurationsReady is False, since RevisionFailed. Revision \"helloworld-00002\" failed with message: Container failed with: container exited with no error.\nReady is False, since RevisionFailed. Revision \"helloworld-00002\" failed with message: Container failed with: container exited with no error.\n" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: Knative Service is healthy. + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/serving.knative.dev/Service/testdata/degraded.yaml b/pkg/resource_customizations/serving.knative.dev/Service/testdata/degraded.yaml new file mode 100644 index 0000000..cc23d52 --- /dev/null +++ b/pkg/resource_customizations/serving.knative.dev/Service/testdata/degraded.yaml @@ -0,0 +1,21 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + namespace: default +spec: {} +status: + conditions: + - lastTransitionTime: "2022-06-14T03:45:38Z" + message: 'Revision "helloworld-00002" failed with message: Container failed with: container exited with no error.' + reason: RevisionFailed + status: "False" + type: ConfigurationsReady + - lastTransitionTime: "2022-06-14T03:45:38Z" + message: 'Revision "helloworld-00002" failed with message: Container failed with: container exited with no error.' + reason: RevisionFailed + status: "False" + type: Ready + - lastTransitionTime: "2022-06-16T09:35:11Z" + status: "True" + type: RoutesReady diff --git a/pkg/resource_customizations/serving.knative.dev/Service/testdata/healthy.yaml b/pkg/resource_customizations/serving.knative.dev/Service/testdata/healthy.yaml new file mode 100644 index 0000000..ced6960 --- /dev/null +++ b/pkg/resource_customizations/serving.knative.dev/Service/testdata/healthy.yaml @@ -0,0 +1,17 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + namespace: default +spec: {} +status: + conditions: + - lastTransitionTime: "2022-06-20T03:52:41Z" + status: "True" + type: ConfigurationsReady + - lastTransitionTime: "2022-06-20T03:52:41Z" + status: "True" + type: Ready + - lastTransitionTime: "2022-06-20T03:52:41Z" + status: "True" + type: RoutesReady diff --git a/pkg/resource_customizations/serving.knative.dev/Service/testdata/progressing.yaml b/pkg/resource_customizations/serving.knative.dev/Service/testdata/progressing.yaml new file mode 100644 index 0000000..f381e5f --- /dev/null +++ b/pkg/resource_customizations/serving.knative.dev/Service/testdata/progressing.yaml @@ -0,0 +1,21 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld + namespace: default +spec: {} +status: + conditions: + - lastTransitionTime: "2022-06-20T04:14:22Z" + status: "True" + type: ConfigurationsReady + - lastTransitionTime: "2022-06-20T04:14:22Z" + message: A gradual rollout of the latest revision(s) is in progress. + reason: RolloutInProgress + status: Unknown + type: Ready + - lastTransitionTime: "2022-06-20T04:14:22Z" + message: A gradual rollout of the latest revision(s) is in progress. + reason: RolloutInProgress + status: Unknown + type: RoutesReady diff --git a/pkg/resource_customizations/serving.kserve.io/InferenceService/health.lua b/pkg/resource_customizations/serving.kserve.io/InferenceService/health.lua new file mode 100644 index 0000000..fbcfbf7 --- /dev/null +++ b/pkg/resource_customizations/serving.kserve.io/InferenceService/health.lua @@ -0,0 +1,40 @@ +local health_status = {} +health_status.status = "Progressing" +health_status.message = "Waiting for status update." +if obj.status ~= nil and obj.status.conditions ~= nil then + local status_true = 0 + local status_false = 0 + local status_unknown = 0 + health_status.message = "" + for i, condition in pairs(obj.status.conditions) do + if condition.status == "True" and (condition.type == "IngressReady" or condition.type == "PredictorConfigurationReady" or condition.type == "PredictorReady" or condition.type == "PredictorRouteReady" or condition.type == "Ready") then + status_true = status_true + 1 + elseif condition.status == "False" or condition.status == "Unknown" then + msg = condition.type .. " is " .. condition.status + if condition.reason ~= nil and condition.reason ~= "" then + msg = msg .. ", since " .. condition.reason .. "." + end + if condition.message ~= nil and condition.message ~= "" then + msg = msg .. " " .. condition.message + end + health_status.message = health_status.message .. msg .. "\n" + if condition.status == "False" then + status_false = status_false + 1 + else + status_unknown = status_unknown + 1 + end + end + end + if status_true == 5 and status_false == 0 and status_unknown == 0 then + health_status.message = "Inference Service is healthy." + health_status.status = "Healthy" + return health_status + elseif status_false > 0 then + health_status.status = "Degraded" + return health_status + else + health_status.status = "Progressing" + return health_status + end +end +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/serving.kserve.io/InferenceService/health_test.yaml b/pkg/resource_customizations/serving.kserve.io/InferenceService/health_test.yaml new file mode 100644 index 0000000..e8f32bd --- /dev/null +++ b/pkg/resource_customizations/serving.kserve.io/InferenceService/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "PredictorConfigurationReady is Unknown\nPredictorReady is Unknown, since RevisionMissing. Configuration \"hello-world-predictor-default\" is waiting for a Revision to become ready.\nPredictorRouteReady is Unknown, since RevisionMissing. Configuration \"hello-world-predictor-default\" is waiting for a Revision to become ready.\nReady is Unknown, since RevisionMissing. Configuration \"hello-world-predictor-default\" is waiting for a Revision to become ready.\n" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "IngressReady is False, since Predictor ingress not created.\nPredictorConfigurationReady is False, since RevisionFailed. Revision \"helloworld-00002\" failed with message: Container failed with: container exited with no error.\nPredictorReady is False, since RevisionFailed. Revision \"helloworld-00002\" failed with message: Container failed with: container exited with no error.\nReady is False, since Predictor ingress not created.\n" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: Inference Service is healthy. + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/degraded.yaml b/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/degraded.yaml new file mode 100644 index 0000000..0cd3378 --- /dev/null +++ b/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/degraded.yaml @@ -0,0 +1,30 @@ +apiVersion: serving.kserve.io/v1beta1 +kind: InferenceService +metadata: + name: helloworld + namespace: default +spec: {} +status: + conditions: + - lastTransitionTime: "2022-06-14T03:45:38Z" + reason: Predictor ingress not created + status: "False" + type: IngressReady + - lastTransitionTime: "2022-06-14T03:45:38Z" + message: 'Revision "helloworld-00002" failed with message: Container failed with: container exited with no error.' + reason: RevisionFailed + status: "False" + type: PredictorConfigurationReady + - lastTransitionTime: "2022-06-14T03:45:38Z" + message: 'Revision "helloworld-00002" failed with message: Container failed with: container exited with no error.' + reason: RevisionFailed + status: "False" + type: PredictorReady + - lastTransitionTime: "2022-06-14T03:45:38Z" + severity: Info + status: "True" + type: PredictorRouteReady + - lastTransitionTime: "2022-06-14T03:45:38Z" + reason: Predictor ingress not created + status: "False" + type: Ready diff --git a/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/healthy.yaml b/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/healthy.yaml new file mode 100644 index 0000000..3c28c61 --- /dev/null +++ b/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/healthy.yaml @@ -0,0 +1,25 @@ +apiVersion: serving.kserve.io/v1beta1 +kind: InferenceService +metadata: + name: helloworld + namespace: default +spec: {} +status: + conditions: + - lastTransitionTime: "2023-06-20T22:44:51Z" + status: "True" + type: IngressReady + - lastTransitionTime: "2023-06-20T22:44:50Z" + severity: Info + status: "True" + type: PredictorConfigurationReady + - lastTransitionTime: "2023-06-20T22:44:51Z" + status: "True" + type: PredictorReady + - lastTransitionTime: "2023-06-20T22:44:51Z" + severity: Info + status: "True" + type: PredictorRouteReady + - lastTransitionTime: "2023-06-20T22:44:51Z" + status: "True" + type: Ready diff --git a/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/progressing.yaml b/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/progressing.yaml new file mode 100644 index 0000000..fab0a57 --- /dev/null +++ b/pkg/resource_customizations/serving.kserve.io/InferenceService/testdata/progressing.yaml @@ -0,0 +1,28 @@ +apiVersion: serving.kserve.io/v1beta1 +kind: InferenceService +metadata: + name: helloworld + namespace: default +spec: {} +status: + conditions: + - lastTransitionTime: "2023-06-21T22:25:58Z" + severity: Info + status: Unknown + type: PredictorConfigurationReady + - lastTransitionTime: "2023-06-21T22:25:58Z" + message: 'Configuration "hello-world-predictor-default" is waiting for a Revision to become ready.' + reason: RevisionMissing + status: Unknown + type: PredictorReady + - lastTransitionTime: "2023-06-21T22:25:58Z" + message: 'Configuration "hello-world-predictor-default" is waiting for a Revision to become ready.' + reason: RevisionMissing + severity: Info + status: Unknown + type: PredictorRouteReady + - lastTransitionTime: "2023-06-21T22:25:58Z" + message: 'Configuration "hello-world-predictor-default" is waiting for a Revision to become ready.' + reason: RevisionMissing + status: Unknown + type: Ready diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/health.lua b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/health.lua new file mode 100644 index 0000000..082de2c --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/health.lua @@ -0,0 +1,18 @@ +local hs = {} + +if obj.status ~= nil and obj.status.readyToUse then + hs.status = "Healthy" + hs.message = "Ready to use" + return hs +end + +if obj.status ~= nil and obj.status.error ~= nil then + hs.status = "Degraded" + hs.message = obj.status.error.message + return hs +end + +hs.status = "Progressing" +hs.message = "Waiting for status" + +return hs diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/health_test.yaml b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/health_test.yaml new file mode 100644 index 0000000..7914d4a --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/health_test.yaml @@ -0,0 +1,14 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for status" + inputPath: testdata/initializing.yaml +- healthStatus: + status: Healthy + message: "Ready to use" + inputPath: testdata/good.yaml +- healthStatus: + status: Degraded + message: "VolumeSnapshotContent is dynamically provisioned while expecting a pre-provisioned one" + inputPath: testdata/bad.yaml + diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/bad.yaml b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/bad.yaml new file mode 100644 index 0000000..2d7447f --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/bad.yaml @@ -0,0 +1,14 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + name: data-04-06-2023 +spec: + source: + volumeSnapshotContentName: data-04-06-2023 +status: + error: + message: >- + VolumeSnapshotContent is dynamically provisioned while expecting a + pre-provisioned one + time: '2023-06-05T14:51:25Z' + readyToUse: false diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/good.yaml b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/good.yaml new file mode 100644 index 0000000..b8a82ef --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/good.yaml @@ -0,0 +1,15 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + finalizers: + - snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection + - snapshot.storage.kubernetes.io/volumesnapshot-bound-protection +status: + boundVolumeSnapshotContentName: snapcontent-7db10be0-424c-4ed2-9dfe-6c2120eae05b + creationTime: '2023-06-04T19:13:20Z' + readyToUse: true + restoreSize: 1Ti +spec: + source: + persistentVolumeClaimName: mask-data-process-trcxk-mysql-data + volumeSnapshotClassName: azure-tools diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/initializing.yaml b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/initializing.yaml new file mode 100644 index 0000000..3df029d --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshot/testdata/initializing.yaml @@ -0,0 +1,7 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + name: data-04-06-2023 +spec: + driver: disk.csi.azure.com +status: {} diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/health.lua b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/health.lua new file mode 100644 index 0000000..082de2c --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/health.lua @@ -0,0 +1,18 @@ +local hs = {} + +if obj.status ~= nil and obj.status.readyToUse then + hs.status = "Healthy" + hs.message = "Ready to use" + return hs +end + +if obj.status ~= nil and obj.status.error ~= nil then + hs.status = "Degraded" + hs.message = obj.status.error.message + return hs +end + +hs.status = "Progressing" +hs.message = "Waiting for status" + +return hs diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/health_test.yaml b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/health_test.yaml new file mode 100644 index 0000000..6cc455a --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for status" + inputPath: testdata/initializing.yaml +- healthStatus: + status: Healthy + message: "Ready to use" + inputPath: testdata/good.yaml +- healthStatus: + status: Degraded + message: "Failed to check and update snapshot content" + inputPath: testdata/bad.yaml diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/bad.yaml b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/bad.yaml new file mode 100644 index 0000000..d8d3d3d --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/bad.yaml @@ -0,0 +1,12 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotContent +metadata: + name: data-04-06-2023 +spec: + driver: disk.csi.azure.com +status: + error: + message: >- + Failed to check and update snapshot content + time: '2023-06-05T15:44:50Z' + readyToUse: false diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/good.yaml b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/good.yaml new file mode 100644 index 0000000..56166be --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/good.yaml @@ -0,0 +1,20 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotContent +metadata: + creationTimestamp: '2023-06-04T19:13:19Z' + finalizers: + - snapshot.storage.kubernetes.io/volumesnapshotcontent-bound-protection +status: + creationTime: 1685906000388294100 + readyToUse: true + restoreSize: 1099511627776 + snapshotHandle: >- + /subscriptions/XXXXXX +spec: + driver: disk.csi.azure.com + source: + volumeHandle: >- + /subscriptions/XXXXXX + volumeSnapshotClassName: azure-tools + volumeSnapshotRef: + apiVersion: snapshot.storage.k8s.io/v1 diff --git a/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/initializing.yaml b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/initializing.yaml new file mode 100644 index 0000000..8558cf3 --- /dev/null +++ b/pkg/resource_customizations/snapshot.storage.k8s.io/VolumeSnapshotContent/testdata/initializing.yaml @@ -0,0 +1,7 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotContent +metadata: + name: data-04-06-2023 +spec: + driver: disk.csi.azure.com +status: {} diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/health.lua b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/health.lua new file mode 100644 index 0000000..c900823 --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/health.lua @@ -0,0 +1,144 @@ +local health_status = {} +-- Can't use standard lib, math.huge equivalent +local infinity = 2^1024-1 + +local function executor_range_api() + local min_executor_instances = 0 + local max_executor_instances = infinity + if obj.spec.dynamicAllocation.maxExecutors then + max_executor_instances = obj.spec.dynamicAllocation.maxExecutors + end + if obj.spec.dynamicAllocation.minExecutors then + min_executor_instances = obj.spec.dynamicAllocation.minExecutors + end + return min_executor_instances, max_executor_instances +end + +local function maybe_executor_range_spark_conf() + local min_executor_instances = 0 + local max_executor_instances = infinity + if obj.spec.sparkConf["spark.streaming.dynamicAllocation.enabled"] ~= nil and + obj.spec.sparkConf["spark.streaming.dynamicAllocation.enabled"] == "true" then + if(obj.spec.sparkConf["spark.streaming.dynamicAllocation.maxExecutors"] ~= nil) then + max_executor_instances = tonumber(obj.spec.sparkConf["spark.streaming.dynamicAllocation.maxExecutors"]) + end + if(obj.spec.sparkConf["spark.streaming.dynamicAllocation.minExecutors"] ~= nil) then + min_executor_instances = tonumber(obj.spec.sparkConf["spark.streaming.dynamicAllocation.minExecutors"]) + end + return min_executor_instances, max_executor_instances + elseif obj.spec.sparkConf["spark.dynamicAllocation.enabled"] ~= nil and + obj.spec.sparkConf["spark.dynamicAllocation.enabled"] == "true" then + if(obj.spec.sparkConf["spark.dynamicAllocation.maxExecutors"] ~= nil) then + max_executor_instances = tonumber(obj.spec.sparkConf["spark.dynamicAllocation.maxExecutors"]) + end + if(obj.spec.sparkConf["spark.dynamicAllocation.minExecutors"] ~= nil) then + min_executor_instances = tonumber(obj.spec.sparkConf["spark.dynamicAllocation.minExecutors"]) + end + return min_executor_instances, max_executor_instances + else + return nil + end +end + +local function maybe_executor_range() + if obj.spec["dynamicAllocation"] and obj.spec.dynamicAllocation.enabled then + return executor_range_api() + elseif obj.spec["sparkConf"] ~= nil then + return maybe_executor_range_spark_conf() + else + return nil + end +end + +local function dynamic_executors_without_spec_config() + if obj.spec.dynamicAllocation == nil and obj.spec.executor.instances == nil then + return true + else + return false + end +end + +if obj.status ~= nil then + if obj.status.applicationState.state ~= nil then + if obj.status.applicationState.state == "" then + health_status.status = "Progressing" + health_status.message = "SparkApplication was added, enqueuing it for submission" + return health_status + end + if obj.status.applicationState.state == "RUNNING" then + if obj.status.executorState ~= nil then + count=0 + for i, executorState in pairs(obj.status.executorState) do + if executorState == "RUNNING" then + count=count+1 + end + end + if obj.spec.executor.instances ~= nil and obj.spec.executor.instances == count then + health_status.status = "Healthy" + health_status.message = "SparkApplication is Running" + return health_status + elseif maybe_executor_range() then + local min_executor_instances, max_executor_instances = maybe_executor_range() + if count >= min_executor_instances and count <= max_executor_instances then + health_status.status = "Healthy" + health_status.message = "SparkApplication is Running" + return health_status + end + elseif dynamic_executors_without_spec_config() and count >= 1 then + health_status.status = "Healthy" + health_status.message = "SparkApplication is Running" + return health_status + end + end + end + if obj.status.applicationState.state == "SUBMITTED" then + health_status.status = "Progressing" + health_status.message = "SparkApplication was submitted successfully" + return health_status + end + if obj.status.applicationState.state == "COMPLETED" then + health_status.status = "Healthy" + health_status.message = "SparkApplication was Completed" + return health_status + end + if obj.status.applicationState.state == "FAILED" then + health_status.status = "Degraded" + health_status.message = obj.status.applicationState.errorMessage + return health_status + end + if obj.status.applicationState.state == "SUBMISSION_FAILED" then + health_status.status = "Degraded" + health_status.message = obj.status.applicationState.errorMessage + return health_status + end + if obj.status.applicationState.state == "PENDING_RERUN" then + health_status.status = "Progressing" + health_status.message = "SparkApplication is Pending Rerun" + return health_status + end + if obj.status.applicationState.state == "INVALIDATING" then + health_status.status = "Missing" + health_status.message = "SparkApplication is in InvalidatingState" + return health_status + end + if obj.status.applicationState.state == "SUCCEEDING" then + health_status.status = "Progressing" + health_status.message = [[The driver pod has been completed successfully. The executor pods terminate and are cleaned up. + Under this circumstances, we assume the executor pod are completed.]] + return health_status + end + if obj.status.applicationState.state == "FAILING" then + health_status.status = "Degraded" + health_status.message = obj.status.applicationState.errorMessage + return health_status + end + if obj.status.applicationState.state == "UNKNOWN" then + health_status.status = "Progressing" + health_status.message = "SparkApplication is in UnknownState because either driver pod or one or all executor pods in unknown state " + return health_status + end + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for Executor pods" +return health_status diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/health_test.yaml b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/health_test.yaml new file mode 100644 index 0000000..e0ad7df --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/health_test.yaml @@ -0,0 +1,29 @@ +tests: +- healthStatus: + status: Progressing + message: "SparkApplication is Pending Rerun" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Degraded + message: "Job Failed" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Healthy + message: "SparkApplication is Running" + inputPath: testdata/healthy.yaml +- healthStatus: + status: Healthy + message: "SparkApplication is Running" + inputPath: testdata/healthy_dynamic_alloc.yaml +- healthStatus: + status: Healthy + message: "SparkApplication is Running" + inputPath: testdata/healthy_dynamic_alloc_dstream.yaml +- healthStatus: + status: Healthy + message: "SparkApplication is Running" + inputPath: testdata/healthy_dynamic_alloc_operator_api.yaml +- healthStatus: + status: Healthy + message: "SparkApplication is Running" + inputPath: testdata/healthy_dynamic_alloc_without_spec_config.yaml diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/degraded.yaml b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/degraded.yaml new file mode 100644 index 0000000..20e12a8 --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/degraded.yaml @@ -0,0 +1,33 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + generation: 4 + labels: + argocd.argoproj.io/instance: spark-job + name: spark-job-app + namespace: spark-cluster + resourceVersion: "31812990" + uid: bfee52b0-74ca-4465-8005-f6643097ed64 +spec: + executor: + instances: 4 +status: + applicationState: + state: FAILED + errorMessage: Job Failed + driverInfo: + podName: spark-job-app-driver + webUIAddress: 172.20.207.161:4040 + webUIPort: 4040 + webUIServiceName: spark-job-app-ui-svc + executionAttempts: 13 + executorState: + spark-job-app-1591613851251-exec-1: FAILED + spark-job-app-1591613851251-exec-2: RUNNING + spark-job-app-1591613851251-exec-4: FAILED + spark-job-app-1591613851251-exec-5: RUNNING + lastSubmissionAttemptTime: "2020-06-08T10:57:32Z" + sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82 + submissionAttempts: 1 + submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0 + terminationTime: null \ No newline at end of file diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy.yaml b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy.yaml new file mode 100644 index 0000000..cd6e858 --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy.yaml @@ -0,0 +1,32 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + generation: 4 + labels: + argocd.argoproj.io/instance: spark-job + name: spark-job-app + namespace: spark-cluster + resourceVersion: "31812990" + uid: bfee52b0-74ca-4465-8005-f6643097ed64 +spec: + executor: + instances: 4 +status: + applicationState: + state: RUNNING + driverInfo: + podName: ingestion-datalake-news-app-driver + webUIAddress: 172.20.207.161:4040 + webUIPort: 4040 + webUIServiceName: ingestion-datalake-news-app-ui-svc + executionAttempts: 13 + executorState: + ingestion-datalake-news-app-1591613851251-exec-1: RUNNING + ingestion-datalake-news-app-1591613851251-exec-2: RUNNING + ingestion-datalake-news-app-1591613851251-exec-4: RUNNING + ingestion-datalake-news-app-1591613851251-exec-5: RUNNING + lastSubmissionAttemptTime: "2020-06-08T10:57:32Z" + sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82 + submissionAttempts: 1 + submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0 + terminationTime: null \ No newline at end of file diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc.yaml b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc.yaml new file mode 100644 index 0000000..9ff52e7 --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc.yaml @@ -0,0 +1,37 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + generation: 4 + labels: + argocd.argoproj.io/instance: spark-job + name: spark-job-app + namespace: spark-cluster + resourceVersion: "31812990" + uid: bfee52b0-74ca-4465-8005-f6643097ed64 +spec: + executor: + instances: 4 + sparkConf: + spark.dynamicAllocation.enabled: 'true' + spark.dynamicAllocation.maxExecutors: '10' + spark.dynamicAllocation.minExecutors: '2' +status: + applicationState: + state: RUNNING + driverInfo: + podName: ingestion-datalake-news-app-driver + webUIAddress: 172.20.207.161:4040 + webUIPort: 4040 + webUIServiceName: ingestion-datalake-news-app-ui-svc + executionAttempts: 13 + executorState: + ingestion-datalake-news-app-1591613851251-exec-1: RUNNING + ingestion-datalake-news-app-1591613851251-exec-2: RUNNING + ingestion-datalake-news-app-1591613851251-exec-4: RUNNING + ingestion-datalake-news-app-1591613851251-exec-5: RUNNING + ingestion-datalake-news-app-1591613851251-exec-6: RUNNING + lastSubmissionAttemptTime: "2020-06-08T10:57:32Z" + sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82 + submissionAttempts: 1 + submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0 + terminationTime: null diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_dstream.yaml b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_dstream.yaml new file mode 100644 index 0000000..ce24ff7 --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_dstream.yaml @@ -0,0 +1,35 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + generation: 4 + labels: + argocd.argoproj.io/instance: spark-job + name: spark-job-app + namespace: spark-cluster + resourceVersion: "31812990" + uid: bfee52b0-74ca-4465-8005-f6643097ed64 +spec: + executor: + instances: 4 + sparkConf: + spark.streaming.dynamicAllocation.enabled: 'true' + spark.streaming.dynamicAllocation.maxExecutors: '10' + spark.streaming.dynamicAllocation.minExecutors: '2' +status: + applicationState: + state: RUNNING + driverInfo: + podName: ingestion-datalake-news-app-driver + webUIAddress: 172.20.207.161:4040 + webUIPort: 4040 + webUIServiceName: ingestion-datalake-news-app-ui-svc + executionAttempts: 13 + executorState: + ingestion-datalake-news-app-1591613851251-exec-1: RUNNING + ingestion-datalake-news-app-1591613851251-exec-4: RUNNING + ingestion-datalake-news-app-1591613851251-exec-6: RUNNING + lastSubmissionAttemptTime: "2020-06-08T10:57:32Z" + sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82 + submissionAttempts: 1 + submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0 + terminationTime: null diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_operator_api.yaml b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_operator_api.yaml new file mode 100644 index 0000000..538a279 --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_operator_api.yaml @@ -0,0 +1,38 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + generation: 4 + labels: + argocd.argoproj.io/instance: spark-job + name: spark-job-app + namespace: spark-cluster + resourceVersion: "31812990" + uid: bfee52b0-74ca-4465-8005-f6643097ed64 +spec: + executor: + instances: 4 + dynamicAllocation: + enabled: true + initialExecutors: 2 + minExecutors: 2 + maxExecutors: 10 +status: + applicationState: + state: RUNNING + driverInfo: + podName: ingestion-datalake-news-app-driver + webUIAddress: 172.20.207.161:4040 + webUIPort: 4040 + webUIServiceName: ingestion-datalake-news-app-ui-svc + executionAttempts: 13 + executorState: + ingestion-datalake-news-app-1591613851251-exec-1: RUNNING + ingestion-datalake-news-app-1591613851251-exec-2: RUNNING + ingestion-datalake-news-app-1591613851251-exec-4: RUNNING + ingestion-datalake-news-app-1591613851251-exec-5: RUNNING + ingestion-datalake-news-app-1591613851251-exec-6: RUNNING + lastSubmissionAttemptTime: "2020-06-08T10:57:32Z" + sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82 + submissionAttempts: 1 + submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0 + terminationTime: null diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_without_spec_config.yaml b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_without_spec_config.yaml new file mode 100644 index 0000000..a2ab7b8 --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/healthy_dynamic_alloc_without_spec_config.yaml @@ -0,0 +1,31 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + generation: 4 + labels: + argocd.argoproj.io/instance: spark-job + name: spark-job-app + namespace: spark-cluster + resourceVersion: "31812990" + uid: bfee52b0-74ca-4465-8005-f6643097ed64 +spec: + executor: {} +status: + applicationState: + state: RUNNING + driverInfo: + podName: ingestion-datalake-news-app-driver + webUIAddress: 172.20.207.161:4040 + webUIPort: 4040 + webUIServiceName: ingestion-datalake-news-app-ui-svc + executionAttempts: 13 + executorState: + ingestion-datalake-news-app-1591613851251-exec-1: RUNNING + ingestion-datalake-news-app-1591613851251-exec-2: RUNNING + ingestion-datalake-news-app-1591613851251-exec-4: RUNNING + ingestion-datalake-news-app-1591613851251-exec-5: RUNNING + lastSubmissionAttemptTime: "2020-06-08T10:57:32Z" + sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82 + submissionAttempts: 1 + submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0 + terminationTime: null diff --git a/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/progressing.yaml b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/progressing.yaml new file mode 100644 index 0000000..177fe68 --- /dev/null +++ b/pkg/resource_customizations/sparkoperator.k8s.io/SparkApplication/testdata/progressing.yaml @@ -0,0 +1,32 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + generation: 4 + labels: + argocd.argoproj.io/instance: spark-job + name: spark-job-app + namespace: spark-cluster + resourceVersion: "31812990" + uid: bfee52b0-74ca-4465-8005-f6643097ed64 +spec: + executor: + instances: 4 +status: + applicationState: + state: PENDING_RERUN + driverInfo: + podName: ingestion-datalake-news-app-driver + webUIAddress: 172.20.207.161:4040 + webUIPort: 4040 + webUIServiceName: ingestion-datalake-news-app-ui-svc + executionAttempts: 13 + executorState: + ingestion-datalake-news-app-1591613851251-exec-1: PENDING_RERUN + ingestion-datalake-news-app-1591613851251-exec-2: RUNNING + ingestion-datalake-news-app-1591613851251-exec-4: PENDING_RERUN + ingestion-datalake-news-app-1591613851251-exec-5: RUNNING + lastSubmissionAttemptTime: "2020-06-08T10:57:32Z" + sparkApplicationId: spark-a5920b2a5aa04d22a737c60759b5bf82 + submissionAttempts: 1 + submissionID: 3e713ec8-9f6c-4e78-ac28-749797c846f0 + terminationTime: null \ No newline at end of file diff --git a/pkg/resource_customizations/spot.io/SpotDeployment/health.lua b/pkg/resource_customizations/spot.io/SpotDeployment/health.lua new file mode 100644 index 0000000..cd39bca --- /dev/null +++ b/pkg/resource_customizations/spot.io/SpotDeployment/health.lua @@ -0,0 +1,41 @@ +local hs = {} + +if obj.status == nil or obj.status.conditions == nil then + return hs +end + +for _, condition in ipairs(obj.status.conditions) do + if condition.type == "InvalidSpec" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Progressing" and condition.reason == "RolloutAborted" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Progressing" and condition.reason == "ProgressDeadlineExceeded" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Paused" and condition.status == "True" then + hs.status = "Suspended" + hs.message = "Rollout is paused" + return hs + end +end + +if obj.status.phase == "Progressing" then + hs.status = "Progressing" + hs.message = "Waiting for rollout to finish steps" + return hs +end + +hs.status = "Healthy" +hs.message = "" +return hs + + + diff --git a/pkg/resource_customizations/spot.io/SpotDeployment/health_test.yaml b/pkg/resource_customizations/spot.io/SpotDeployment/health_test.yaml new file mode 100644 index 0000000..9e81559 --- /dev/null +++ b/pkg/resource_customizations/spot.io/SpotDeployment/health_test.yaml @@ -0,0 +1,12 @@ +tests: +- healthStatus: + status: Degraded + message: "Rollout is aborted" + inputPath: testdata/degraded_spotdeployment.yaml +- healthStatus: + status: Healthy + inputPath: testdata/healthy_spotdeployment.yaml +- healthStatus: + status: Degraded + message: "The Rollout \"basic\" is invalid: spec.strategy.strategy: Required value: Rollout has missing field '.spec.strategy.canary or .spec.strategy.blueGreen'" + inputPath: testdata/invalid_spec_spotdeployment.yaml \ No newline at end of file diff --git a/pkg/resource_customizations/spot.io/SpotDeployment/testdata/degraded_spotdeployment.yaml b/pkg/resource_customizations/spot.io/SpotDeployment/testdata/degraded_spotdeployment.yaml new file mode 100644 index 0000000..9a79343 --- /dev/null +++ b/pkg/resource_customizations/spot.io/SpotDeployment/testdata/degraded_spotdeployment.yaml @@ -0,0 +1,53 @@ +apiVersion: spot.io/v1beta1 +kind: SpotDeployment +metadata: + name: canary-demo + namespace: default +spec: + replicas: 5 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: canary-demo + template: + metadata: + creationTimestamp: null + labels: + app: canary-demo + spec: + containers: + - image: nginx:1.21 + imagePullPolicy: Always + name: canary-demo + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 5m + memory: 32Mi +status: + availableReplicas: 5 + conditions: + - lastTransitionTime: "2019-11-03T01:32:46Z" + lastUpdateTime: "2019-11-03T01:32:46Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2019-11-05T18:20:12Z" + lastUpdateTime: "2019-11-05T18:20:12Z" + message: Rollout is aborted + reason: RolloutAborted + status: "False" + type: Progressing + observedGeneration: 58b949649c + readyReplicas: 5 + replicas: 5 + selector: app=canary-demo + liveVersionDate: "2022-07-25T08:57:57.000Z" + liveVersionImages: + nginx: nginx:1.20 + phase: Degraded + revision: "52" \ No newline at end of file diff --git a/pkg/resource_customizations/spot.io/SpotDeployment/testdata/healthy_spotdeployment.yaml b/pkg/resource_customizations/spot.io/SpotDeployment/testdata/healthy_spotdeployment.yaml new file mode 100644 index 0000000..175b698 --- /dev/null +++ b/pkg/resource_customizations/spot.io/SpotDeployment/testdata/healthy_spotdeployment.yaml @@ -0,0 +1,54 @@ +apiVersion: spot.io/v1beta1 +kind: SpotDeployment +metadata: + clusterName: '' + creationTimestamp: '2019-05-01T21:55:30Z' + generation: 1 + labels: + app.kubernetes.io/instance: guestbook-canary + ksonnet.io/component: guestbook-ui + name: guestbook-canary + namespace: default + resourceVersion: '955764' + uid: d6105ccd-6c5b-11e9-b8d7-025000000001 +spec: + minReadySeconds: 10 + replicas: 5 + selector: + matchLabels: + app: guestbook-canary + template: + metadata: + creationTimestamp: null + labels: + app: guestbook-canary + spec: + containers: + - image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1' + name: guestbook-canary + ports: + - containerPort: 80 + resources: {} +status: + conditions: + - lastTransitionTime: '2019-05-01T21:55:30Z' + lastUpdateTime: '2019-05-01T21:55:58Z' + message: ReplicaSet "guestbook-canary-84ccfddd66" has successfully progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + - lastTransitionTime: '2019-05-01T21:55:58Z' + lastUpdateTime: '2019-05-01T21:55:58Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + observedGeneration: c45557fd9 + readyReplicas: 5 + replicas: 5 + selector: app=guestbook-canary + liveVersionDate: "2022-07-14T07:56:27.000Z" + liveVersionImages: + rollouts-demo: gcr.io/heptio-images/ks-guestbook-demo:0.1 + phase: Healthy + revision: "9" \ No newline at end of file diff --git a/pkg/resource_customizations/spot.io/SpotDeployment/testdata/invalid_spec_spotdeployment.yaml b/pkg/resource_customizations/spot.io/SpotDeployment/testdata/invalid_spec_spotdeployment.yaml new file mode 100644 index 0000000..f515c9e --- /dev/null +++ b/pkg/resource_customizations/spot.io/SpotDeployment/testdata/invalid_spec_spotdeployment.yaml @@ -0,0 +1,54 @@ +apiVersion: spot.io/v1beta1 +kind: SpotDeployment +metadata: + creationTimestamp: "2020-11-13T00:22:49Z" + generation: 3 + name: basic + namespace: argocd-e2e + resourceVersion: "181746" + uid: 5b0926f3-30b7-4727-a76e-46c0d2617906 +spec: + replicas: 1 + selector: + matchLabels: + app: basic + template: + metadata: + creationTimestamp: null + labels: + app: basic + spec: + containers: + - image: nginx:1.19-alpine + name: basic + resources: + requests: + cpu: 1m + memory: 16Mi +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2020-11-13T00:22:48Z" + lastUpdateTime: "2020-11-13T00:22:50Z" + message: ReplicaSet "basic-754cb84d5" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2020-11-13T00:22:50Z" + lastUpdateTime: "2020-11-13T00:22:50Z" + message: Rollout has minimum availability + reason: AvailableReason + status: "True" + type: Available + - lastTransitionTime: "2020-11-13T00:40:30Z" + lastUpdateTime: "2020-11-13T00:40:30Z" + message: 'The Rollout "basic" is invalid: spec.strategy.strategy: Required value: + Rollout has missing field ''.spec.strategy.canary or .spec.strategy.blueGreen''' + reason: InvalidSpec + status: "True" + type: InvalidSpec + observedGeneration: "3" + readyReplicas: 1 + replicas: 1 + selector: app=basic + phase: Degraded diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/health.lua b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/health_test.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/dependency_not_found.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..473c408 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLDatabase +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..7b232b8 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLDatabase +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/up_to_date.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/up_to_date.yaml new file mode 100644 index 0000000..851787c --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLDatabase +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/update_failed.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/update_failed.yaml new file mode 100644 index 0000000..094adba --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLDatabase +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/update_in_progress.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/update_in_progress.yaml new file mode 100644 index 0000000..aa9c1b8 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLDatabase/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLDatabase +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/health.lua b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/health_test.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/dependency_not_found.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..7e5c48b --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLInstance +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..c8d7cd7 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLInstance +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/up_to_date.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/up_to_date.yaml new file mode 100644 index 0000000..6e3f5dd --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLInstance +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/update_failed.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/update_failed.yaml new file mode 100644 index 0000000..58a1ab0 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLInstance +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/update_in_progress.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/update_in_progress.yaml new file mode 100644 index 0000000..501d5ee --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLInstance/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLInstance +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/health_test.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/dependency_not_found.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..eaceeca --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLUser +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..4eb6724 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLUser +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/up_to_date.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/up_to_date.yaml new file mode 100644 index 0000000..96bfc04 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLUser +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/update_failed.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/update_failed.yaml new file mode 100644 index 0000000..b80f8c6 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLUser +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/update_in_progress.yaml b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/update_in_progress.yaml new file mode 100644 index 0000000..ffa6af0 --- /dev/null +++ b/pkg/resource_customizations/sql.cnrm.cloud.google.com/SQLUser/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLUser +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready \ No newline at end of file diff --git a/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/health.lua b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/health.lua new file mode 100644 index 0000000..9e94c5f --- /dev/null +++ b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/health.lua @@ -0,0 +1,20 @@ +local hs = { + status = "Progressing", + message = "Waiting for stack to be installed" +} +if obj.status ~= nil then + if obj.status.conditionedStatus ~= nil then + if obj.status.conditionedStatus.conditions ~= nil then + for i, condition in ipairs(obj.status.conditionedStatus.conditions) do + if condition.type == "Ready" then + hs.message = condition.reason + if condition.status == "True" then + hs.status = "Healthy" + return hs + end + end + end + end + end +end +return hs diff --git a/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/health_test.yaml b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/health_test.yaml new file mode 100644 index 0000000..c32bc8f --- /dev/null +++ b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Progressing + message: Waiting for stack to be installed + inputPath: testdata/wait_stack.yaml +- healthStatus: + status: Progressing + message: 'Resource is being created' + inputPath: testdata/being_created_stack.yaml +- healthStatus: + status: Healthy + message: 'Resource is available for use' + inputPath: testdata/installed_stack.yaml diff --git a/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/being_created_stack.yaml b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/being_created_stack.yaml new file mode 100644 index 0000000..55ff238 --- /dev/null +++ b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/being_created_stack.yaml @@ -0,0 +1,34 @@ +apiVersion: stacks.crossplane.io/v1alpha1 +kind: ClusterStackInstall +metadata: + creationTimestamp: "2020-05-13T09:35:26Z" + finalizers: + - finalizer.stackinstall.crossplane.io + generation: 1 + labels: + argocd.argoproj.io/instance: crossplane-cloudscale + name: stack-cloudscale + name: stack-cloudscale + namespace: syn-crossplane + resourceVersion: "20004" + selfLink: /apis/stacks.crossplane.io/v1alpha1/namespaces/syn-crossplane/clusterstackinstalls/stack-cloudscale + uid: cce4dfb5-185f-421d-be97-338408e0c712 +spec: + package: docker.io/vshn/stack-cloudscale:v0.0.2@sha256:8a9a94c3ef557da951d5c7f5bb0286a2f36c79f7ece499f61a8807383caed59b +status: + conditionedStatus: + conditions: + - lastTransitionTime: "2020-05-13T09:35:26Z" + reason: Resource is being created + status: "False" + type: Ready + - lastTransitionTime: "2020-05-13T09:35:26Z" + reason: Successfully reconciled resource + status: "True" + type: Synced + installJob: + apiVersion: batch/v1 + kind: Job + name: stack-cloudscale + namespace: syn-crossplane + uid: e9c2d5d5-41b1-4b11-8193-e5029c37cc52 diff --git a/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/installed_stack.yaml b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/installed_stack.yaml new file mode 100644 index 0000000..86d9262 --- /dev/null +++ b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/installed_stack.yaml @@ -0,0 +1,40 @@ +apiVersion: stacks.crossplane.io/v1alpha1 +kind: ClusterStackInstall +metadata: + creationTimestamp: "2020-05-13T09:35:26Z" + finalizers: + - finalizer.stackinstall.crossplane.io + generation: 1 + labels: + argocd.argoproj.io/instance: crossplane-cloudscale + name: stack-cloudscale + name: stack-cloudscale + namespace: syn-crossplane + resourceVersion: "20136" + selfLink: /apis/stacks.crossplane.io/v1alpha1/namespaces/syn-crossplane/clusterstackinstalls/stack-cloudscale + uid: cce4dfb5-185f-421d-be97-338408e0c712 +spec: + package: docker.io/vshn/stack-cloudscale:v0.0.2@sha256:8a9a94c3ef557da951d5c7f5bb0286a2f36c79f7ece499f61a8807383caed59b +status: + conditionedStatus: + conditions: + - lastTransitionTime: "2020-05-13T09:35:48Z" + reason: Resource is available for use + status: "True" + type: Ready + - lastTransitionTime: "2020-05-13T09:35:26Z" + reason: Successfully reconciled resource + status: "True" + type: Synced + installJob: + apiVersion: batch/v1 + kind: Job + name: stack-cloudscale + namespace: syn-crossplane + uid: e9c2d5d5-41b1-4b11-8193-e5029c37cc52 + stackRecord: + apiVersion: stacks.crossplane.io/v1alpha1 + kind: Stack + name: stack-cloudscale + namespace: syn-crossplane + uid: abccf273-2ec1-45a1-9738-af7b6aa39b76 diff --git a/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/wait_stack.yaml b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/wait_stack.yaml new file mode 100644 index 0000000..5be2da7 --- /dev/null +++ b/pkg/resource_customizations/stacks.crossplane.io/ClusterStackInstall/testdata/wait_stack.yaml @@ -0,0 +1,17 @@ +apiVersion: stacks.crossplane.io/v1alpha1 +kind: ClusterStackInstall +metadata: + creationTimestamp: "2020-05-13T09:35:26Z" + finalizers: + - finalizer.stackinstall.crossplane.io + generation: 1 + labels: + argocd.argoproj.io/instance: crossplane-cloudscale + name: stack-cloudscale + name: stack-cloudscale + namespace: syn-crossplane + resourceVersion: "19999" + selfLink: /apis/stacks.crossplane.io/v1alpha1/namespaces/syn-crossplane/clusterstackinstalls/stack-cloudscale + uid: cce4dfb5-185f-421d-be97-338408e0c712 +spec: + package: docker.io/vshn/stack-cloudscale:v0.0.2@sha256:8a9a94c3ef557da951d5c7f5bb0286a2f36c79f7ece499f61a8807383caed59b diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/health.lua b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/health_test.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/dependency_not_found.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..a0e2c1f --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..c478ae0 --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/up_to_date.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/up_to_date.yaml new file mode 100644 index 0000000..514048e --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/update_failed.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/update_failed.yaml new file mode 100644 index 0000000..3662389 --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/update_in_progress.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/update_in_progress.yaml new file mode 100644 index 0000000..58f68bb --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucket/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/health.lua b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/health_test.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/dependency_not_found.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..52404ca --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucketAccessControl +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..c094801 --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucketAccessControl +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/up_to_date.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/up_to_date.yaml new file mode 100644 index 0000000..b7f45aa --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucketAccessControl +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/update_failed.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/update_failed.yaml new file mode 100644 index 0000000..190b92c --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucketAccessControl +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/update_in_progress.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/update_in_progress.yaml new file mode 100644 index 0000000..36a7692 --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageBucketAccessControl/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucketAccessControl +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/health.lua b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/health.lua new file mode 100644 index 0000000..585b5e2 --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/health.lua @@ -0,0 +1,39 @@ +local hs = { + status = "Progressing", + message = "Update in progress" +} +if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + + -- Up To Date + if condition.reason == "UpToDate" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + + -- Update Failed + if condition.reason == "UpdateFailed" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Found + if condition.reason == "DependencyNotFound" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + + -- Dependency Not Ready + if condition.reason == "DependencyNotReady" then + hs.status = "Suspended" + hs.message = condition.message + return hs + end + end + end +end +return hs \ No newline at end of file diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/health_test.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/health_test.yaml new file mode 100644 index 0000000..8354cfb --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/health_test.yaml @@ -0,0 +1,21 @@ +tests: +- healthStatus: + status: Degraded + message: "Dependency not found" + inputPath: testdata/dependency_not_found.yaml +- healthStatus: + status: Suspended + message: "Dependency not ready" + inputPath: testdata/dependency_not_ready.yaml +- healthStatus: + status: Healthy + message: "The resource is up to date" + inputPath: testdata/up_to_date.yaml +- healthStatus: + status: Degraded + message: "Update failed" + inputPath: testdata/update_failed.yaml +- healthStatus: + status: Progressing + message: "Update in progress" + inputPath: testdata/update_in_progress.yaml diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/dependency_not_found.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/dependency_not_found.yaml new file mode 100644 index 0000000..283d13d --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/dependency_not_found.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageDefaultObjectAccessControl +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not found + reason: DependencyNotFound + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/dependency_not_ready.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/dependency_not_ready.yaml new file mode 100644 index 0000000..224c2fc --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/dependency_not_ready.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageDefaultObjectAccessControl +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Dependency not ready + reason: DependencyNotReady + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/up_to_date.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/up_to_date.yaml new file mode 100644 index 0000000..1f1f998 --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/up_to_date.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageDefaultObjectAccessControl +status: + conditions: + - lastTransitionTime: '2022-05-09T08:49:18Z' + message: The resource is up to date + reason: UpToDate + status: 'True' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/update_failed.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/update_failed.yaml new file mode 100644 index 0000000..493253f --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/update_failed.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageDefaultObjectAccessControl +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update failed + reason: UpdateFailed + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/update_in_progress.yaml b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/update_in_progress.yaml new file mode 100644 index 0000000..f001e9c --- /dev/null +++ b/pkg/resource_customizations/storage.cnrm.cloud.google.com/StorageDefaultObjectAccessControl/testdata/update_in_progress.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageDefaultObjectAccessControl +status: + conditions: + - lastTransitionTime: '2022-07-01T12:56:21Z' + message: Update in progress + reason: Updating + status: 'False' + type: Ready diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/health.lua b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/health.lua new file mode 100644 index 0000000..1e4a514 --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/health.lua @@ -0,0 +1,25 @@ +hs = {} +if obj.status ~= nil then + if obj.status.ansibleJobResult ~= nil then + jobstatus = obj.status.ansibleJobResult.status + if jobstatus == "successful" then + hs.status = "Healthy" + hs.message = jobstatus .. " job - " .. obj.status.ansibleJobResult.url + return hs + end + if jobstatus == "failed" or jobstatus == "error" or jobstatus == "canceled" then + hs.status = "Degraded" + hs.message = jobstatus .. " job - " .. obj.status.ansibleJobResult.url + return hs + end + if jobstatus == "new" or jobstatus == "pending" or jobstatus == "waiting" or jobstatus == "running" then + hs.status = "Progressing" + hs.message = jobstatus .. " job - " .. obj.status.ansibleJobResult.url + return hs + end + end +end + +hs.status = "Progressing" +hs.message = "Waiting for AnsibleJob" +return hs diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/health_test.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/health_test.yaml new file mode 100644 index 0000000..bb4143a --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/health_test.yaml @@ -0,0 +1,37 @@ +tests: +- healthStatus: + status: Progressing + message: Waiting for AnsibleJob + inputPath: testdata/progressing_noStatus.yaml +- healthStatus: + status: Progressing + message: 'new job - https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1' + inputPath: testdata/progressing_new.yaml +- healthStatus: + status: Progressing + message: 'pending job - https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1' + inputPath: testdata/progressing_pending.yaml +- healthStatus: + status: Progressing + message: 'running job - https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1' + inputPath: testdata/progressing_running.yaml +- healthStatus: + status: Progressing + message: 'waiting job - https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1' + inputPath: testdata/progressing_waiting.yaml +- healthStatus: + status: Degraded + message: 'canceled job - https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1' + inputPath: testdata/degraded_canceled.yaml +- healthStatus: + status: Degraded + message: 'error job - https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1' + inputPath: testdata/degraded_error.yaml +- healthStatus: + status: Degraded + message: 'failed job - https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1' + inputPath: testdata/degraded_failed.yaml +- healthStatus: + status: Healthy + message: 'successful job - https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1' + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_canceled.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_canceled.yaml new file mode 100644 index 0000000..55fa0cd --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_canceled.yaml @@ -0,0 +1,27 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + tower_job_id: "1" + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess +status: + ansibleJobResult: + changed: true + elapsed: "5.21" + failed: false + finished: "2023-06-27T20:22:40.116381Z" + started: "2023-06-27T20:22:34.906399Z" + status: canceled + url: https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1 diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_error.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_error.yaml new file mode 100644 index 0000000..0ebb059 --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_error.yaml @@ -0,0 +1,27 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + tower_job_id: "1" + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess +status: + ansibleJobResult: + changed: true + elapsed: "5.21" + failed: true + finished: "2023-06-27T20:22:40.116381Z" + started: "2023-06-27T20:22:34.906399Z" + status: error + url: https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1 diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_failed.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_failed.yaml new file mode 100644 index 0000000..0400570 --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/degraded_failed.yaml @@ -0,0 +1,27 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + tower_job_id: "1" + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess +status: + ansibleJobResult: + changed: true + elapsed: "5.21" + failed: true + finished: "2023-06-27T20:22:40.116381Z" + started: "2023-06-27T20:22:34.906399Z" + status: failed + url: https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1 diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/healthy.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/healthy.yaml new file mode 100644 index 0000000..395a1bd --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/healthy.yaml @@ -0,0 +1,27 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + tower_job_id: "1" + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess +status: + ansibleJobResult: + changed: true + elapsed: "5.21" + failed: false + finished: "2023-06-27T20:22:40.116381Z" + started: "2023-06-27T20:22:34.906399Z" + status: successful + url: https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1 diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_new.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_new.yaml new file mode 100644 index 0000000..2e700d3 --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_new.yaml @@ -0,0 +1,25 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + tower_job_id: "1" + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess +status: + ansibleJobResult: + changed: true + failed: false + started: "2023-06-27T20:22:34.906399Z" + status: new + url: https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1 diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_noStatus.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_noStatus.yaml new file mode 100644 index 0000000..a6e1701 --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_noStatus.yaml @@ -0,0 +1,17 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_pending.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_pending.yaml new file mode 100644 index 0000000..ffabaf4 --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_pending.yaml @@ -0,0 +1,25 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + tower_job_id: "1" + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess +status: + ansibleJobResult: + changed: true + failed: false + started: "2023-06-27T20:22:34.906399Z" + status: pending + url: https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1 diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_running.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_running.yaml new file mode 100644 index 0000000..6e369f0 --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_running.yaml @@ -0,0 +1,25 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + tower_job_id: "1" + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess +status: + ansibleJobResult: + changed: true + failed: false + started: "2023-06-27T20:22:34.906399Z" + status: running + url: https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1 diff --git a/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_waiting.yaml b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_waiting.yaml new file mode 100644 index 0000000..c6f192c --- /dev/null +++ b/pkg/resource_customizations/tower.ansible.com/AnsibleJob/testdata/progressing_waiting.yaml @@ -0,0 +1,25 @@ +apiVersion: tower.ansible.com/v1alpha1 +kind: AnsibleJob +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + creationTimestamp: "2023-06-27T20:22:22Z" + generateName: prehook-test- + generation: 1 + labels: + app.kubernetes.io/instance: ansible-hooks + tower_job_id: "1" + name: prehook-test-dfcff01-presync-1687897341 + namespace: argocd + resourceVersion: "6536518" + uid: 09fa0d39-a170-4c37-a3b0-6e140e029868 +spec: + job_template_name: Demo Job Template + tower_auth_secret: toweraccess +status: + ansibleJobResult: + changed: true + failed: false + started: "2023-06-27T20:22:34.906399Z" + status: waiting + url: https://argocd.test.ansiblejob.custom.health.com/#/jobs/playbook/1 diff --git a/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/health.lua b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/health.lua new file mode 100644 index 0000000..614915a --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/health.lua @@ -0,0 +1,16 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.phase == "Bound" and obj.status.lastOperationStatus == "Success" then + hs.status = "Healthy" + hs.message = obj.status.message + return hs + end + if obj.status.lastOperationStatus == "Failed" then + hs.status = "Degraded" + hs.message = obj.status.message + return hs + end +end +hs.status = "Progressing" +hs.message = "Waiting for backend creation" +return hs diff --git a/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/health_test.yaml b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/health_test.yaml new file mode 100644 index 0000000..d7cbc7f --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Degraded + message: "Failed to create backend: backend tbc-ontap-nas already exists" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Progressing + message: "Waiting for backend creation" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Healthy + message: "Backend 'tbc-ontap-nas' created" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/degraded.yaml b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/degraded.yaml new file mode 100644 index 0000000..cfc367d --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/degraded.yaml @@ -0,0 +1,95 @@ +apiVersion: trident.netapp.io/v1 +kind: TridentBackendConfig +metadata: + annotations: + creationTimestamp: '2021-06-24T11:36:17Z' + finalizers: + - trident.netapp.io + generation: 1 + labels: + app.kubernetes.io/instance: trident-config + managedFields: + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + 'f:labels': + .: {} + 'f:app.kubernetes.io/instance': {} + 'f:spec': + .: {} + 'f:backendName': {} + 'f:credentials': + .: {} + 'f:name': {} + 'f:dataLIF': {} + 'f:defaults': + .: {} + 'f:exportPolicy': {} + 'f:snapshotPolicy': {} + 'f:snapshotReserve': {} + 'f:spaceReserve': {} + 'f:deletionPolicy': {} + 'f:limitVolumeSize': {} + 'f:managementLIF': {} + 'f:nfsMountOptions': {} + 'f:storageDriverName': {} + 'f:storagePrefix': {} + 'f:svm': {} + 'f:version': {} + manager: argocd-application-controller + operation: Update + time: '2021-06-24T11:36:17Z' + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:finalizers': + .: {} + 'v:"trident.netapp.io"': {} + 'f:status': + .: {} + 'f:backendInfo': + .: {} + 'f:backendName': {} + 'f:backendUUID': {} + 'f:deletionPolicy': {} + 'f:lastOperationStatus': {} + 'f:message': {} + 'f:phase': {} + manager: trident_orchestrator + operation: Update + time: '2021-06-24T11:36:17Z' + name: backend-tbc-ontap-nas + namespace: trident + resourceVersion: '3430134' + uid: a5714401-d4f4-480d-a317-d85c630afe55 +spec: + backendName: tbc-ontap-nas + credentials: + name: backend-ontap-nas-secret-smv10 + dataLIF: 1.1.1.1 + defaults: + exportPolicy: ep_000 + snapshotPolicy: default + snapshotReserve: '10' + spaceReserve: volume + deletionPolicy: retain + limitVolumeSize: 100Gi + managementLIF: 1.1.1.1 + nfsMountOptions: nfsvers=4 + storageDriverName: ontap-nas + storagePrefix: trident-play01 + svm: svm-play01 + version: 1 +status: + backendInfo: + backendName: '' + backendUUID: '' + deletionPolicy: retain + lastOperationStatus: Failed + message: 'Failed to create backend: backend tbc-ontap-nas already exists' + phase: '' diff --git a/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/healthy.yaml b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/healthy.yaml new file mode 100644 index 0000000..2f45144 --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/healthy.yaml @@ -0,0 +1,94 @@ +apiVersion: trident.netapp.io/v1 +kind: TridentBackendConfig +metadata: + creationTimestamp: '2021-06-24T11:30:12Z' + finalizers: + - trident.netapp.io + generation: 1 + labels: + app.kubernetes.io/instance: trident-config + managedFields: + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + 'f:labels': + .: {} + 'f:app.kubernetes.io/instance': {} + 'f:spec': + .: {} + 'f:backendName': {} + 'f:credentials': + .: {} + 'f:name': {} + 'f:dataLIF': {} + 'f:defaults': + .: {} + 'f:exportPolicy': {} + 'f:snapshotPolicy': {} + 'f:snapshotReserve': {} + 'f:spaceReserve': {} + 'f:deletionPolicy': {} + 'f:limitVolumeSize': {} + 'f:managementLIF': {} + 'f:nfsMountOptions': {} + 'f:storageDriverName': {} + 'f:storagePrefix': {} + 'f:svm': {} + 'f:version': {} + manager: argocd-application-controller + operation: Update + time: '2021-06-24T11:30:12Z' + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:finalizers': + .: {} + 'v:"trident.netapp.io"': {} + 'f:status': + .: {} + 'f:backendInfo': + .: {} + 'f:backendName': {} + 'f:backendUUID': {} + 'f:deletionPolicy': {} + 'f:lastOperationStatus': {} + 'f:message': {} + 'f:phase': {} + manager: trident_orchestrator + operation: Update + time: '2021-06-24T11:30:12Z' + name: backend-tbc-ontap-nas + namespace: trident + resourceVersion: '3429076' + uid: 3de31983-aff9-4adb-a31c-1541d2cd86e6 +spec: + backendName: tbc-ontap-nas + credentials: + name: backend-ontap-nas-secret + dataLIF: 1.1.1.1 + defaults: + exportPolicy: ep_000 + snapshotPolicy: default + snapshotReserve: '10' + spaceReserve: volume + deletionPolicy: retain + limitVolumeSize: 100Gi + managementLIF: 1.1.1.1 + nfsMountOptions: nfsvers=4 + storageDriverName: ontap-nas + storagePrefix: trident-play01 + svm: svm-trident-play + version: 1 +status: + backendInfo: + backendName: tbc-ontap-nas + backendUUID: 00000000-0000-0000-0000-000000000000 + deletionPolicy: retain + lastOperationStatus: Success + message: Backend 'tbc-ontap-nas' created + phase: Bound diff --git a/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/progressing.yaml b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/progressing.yaml new file mode 100644 index 0000000..bd8c60b --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentBackendConfig/testdata/progressing.yaml @@ -0,0 +1,91 @@ +apiVersion: trident.netapp.io/v1 +kind: TridentBackendConfig +metadata: + creationTimestamp: '2021-06-24T11:30:12Z' + finalizers: + - trident.netapp.io + generation: 1 + labels: + app.kubernetes.io/instance: trident-config + managedFields: + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + 'f:labels': + .: {} + 'f:app.kubernetes.io/instance': {} + 'f:spec': + .: {} + 'f:backendName': {} + 'f:credentials': + .: {} + 'f:name': {} + 'f:dataLIF': {} + 'f:defaults': + .: {} + 'f:exportPolicy': {} + 'f:snapshotPolicy': {} + 'f:snapshotReserve': {} + 'f:spaceReserve': {} + 'f:deletionPolicy': {} + 'f:limitVolumeSize': {} + 'f:managementLIF': {} + 'f:nfsMountOptions': {} + 'f:storageDriverName': {} + 'f:storagePrefix': {} + 'f:svm': {} + 'f:version': {} + manager: argocd-application-controller + operation: Update + time: '2021-06-24T11:30:12Z' + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:finalizers': + .: {} + 'v:"trident.netapp.io"': {} + 'f:status': + .: {} + 'f:backendInfo': + .: {} + 'f:backendName': {} + 'f:backendUUID': {} + 'f:deletionPolicy': {} + 'f:lastOperationStatus': {} + 'f:message': {} + 'f:phase': {} + manager: trident_orchestrator + operation: Update + time: '2021-06-24T11:30:12Z' + name: backend-tbc-ontap-nas + namespace: trident + resourceVersion: '3429076' + uid: 3de31983-aff9-4adb-a31c-1541d2cd86e6 +spec: + backendName: tbc-ontap-nas + credentials: + name: backend-ontap-nas-secret + dataLIF: 1.1.1.1 + defaults: + exportPolicy: ep_000 + snapshotPolicy: default + snapshotReserve: '10' + spaceReserve: volume + deletionPolicy: retain + limitVolumeSize: 100Gi + managementLIF: 1.1.1.1 + nfsMountOptions: nfsvers=4 + storageDriverName: ontap-nas + storagePrefix: trident-play01 + svm: svm-trident-play + version: 1 +status: + deletionPolicy: retain + lastOperationStatus: Progressing + message: Creating 'tbc-ontap-nas' backend + phase: Bound diff --git a/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/health.lua b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/health.lua new file mode 100644 index 0000000..18c3b89 --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/health.lua @@ -0,0 +1,16 @@ +local hs = {} +if obj.status ~= nil then + if obj.status.status == "Installed" then + hs.status = "Healthy" + hs.message = obj.status.message + return hs + end + if obj.status.status == "Failed" or obj.status.status == "Error" then + hs.status = "Degraded" + hs.message = obj.status.message + return hs + end +end +hs.status = "Progressing" +hs.message = "Waiting for trident installation" +return hs diff --git a/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/health_test.yaml b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/health_test.yaml new file mode 100644 index 0000000..86ee88f --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/health_test.yaml @@ -0,0 +1,13 @@ +tests: +- healthStatus: + status: Degraded + message: "Trident is bound to another CR 'trident'" + inputPath: testdata/degraded.yaml +- healthStatus: + status: Progressing + message: "Waiting for trident installation" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Healthy + message: "Trident installed" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/degraded.yaml b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/degraded.yaml new file mode 100644 index 0000000..deb2058 --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/degraded.yaml @@ -0,0 +1,91 @@ +apiVersion: trident.netapp.io/v1 +kind: TridentOrchestrator +metadata: + creationTimestamp: '2021-06-24T11:27:54Z' + generation: 1 + labels: + app.kubernetes.io/instance: trident-operator + managedFields: + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + 'f:labels': + .: {} + 'f:app.kubernetes.io/instance': {} + 'f:spec': + .: {} + 'f:autosupportImage': {} + 'f:debug': {} + 'f:logFormat': {} + 'f:namespace': {} + 'f:tridentImage': {} + manager: argocd-application-controller + operation: Update + time: '2021-06-24T11:27:54Z' + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:status': + .: {} + 'f:currentInstallationParams': + .: {} + 'f:IPv6': {} + 'f:autosupportHostname': {} + 'f:autosupportImage': {} + 'f:autosupportProxy': {} + 'f:autosupportSerialNumber': {} + 'f:debug': {} + 'f:enableNodePrep': {} + 'f:imagePullSecrets': {} + 'f:imageRegistry': {} + 'f:k8sTimeout': {} + 'f:kubeletDir': {} + 'f:logFormat': {} + 'f:silenceAutosupport': {} + 'f:tridentImage': {} + 'f:message': {} + 'f:namespace': {} + 'f:status': {} + 'f:version': {} + manager: trident-operator + operation: Update + time: '2021-06-24T11:27:55Z' + name: trident + resourceVersion: '3428714' + uid: 4353204f-59f7-41be-a853-dcf114011222 +spec: + IPv6: false + autosupportImage: 'netapp/trident-autosupport:21.01' + autosupportProxy: null + debug: false + enableNodePrep: false + k8sTimeout: 0 + kubeletDir: null + logFormat: text + namespace: trident + silenceAutosupport: false + tridentImage: 'netapp/trident:21.04.0' +status: + currentInstallationParams: + IPv6: '' + autosupportHostname: '' + autosupportImage: '' + autosupportProxy: '' + autosupportSerialNumber: '' + debug: '' + enableNodePrep: '' + imagePullSecrets: null + imageRegistry: '' + k8sTimeout: '' + kubeletDir: '' + logFormat: '' + silenceAutosupport: '' + tridentImage: '' + message: "Trident is bound to another CR 'trident'" + namespace: trident + status: Error + version: '' diff --git a/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/healthy.yaml b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/healthy.yaml new file mode 100644 index 0000000..211c7a2 --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/healthy.yaml @@ -0,0 +1,91 @@ +apiVersion: trident.netapp.io/v1 +kind: TridentOrchestrator +metadata: + creationTimestamp: '2021-06-24T10:51:34Z' + generation: 1 + labels: + app.kubernetes.io/instance: trident-operator + managedFields: + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + 'f:labels': + .: {} + 'f:app.kubernetes.io/instance': {} + 'f:spec': + .: {} + 'f:autosupportImage': {} + 'f:debug': {} + 'f:logFormat': {} + 'f:namespace': {} + 'f:tridentImage': {} + manager: argocd-application-controller + operation: Update + time: '2021-06-24T10:51:34Z' + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:status': + .: {} + 'f:currentInstallationParams': + .: {} + 'f:IPv6': {} + 'f:autosupportHostname': {} + 'f:autosupportImage': {} + 'f:autosupportProxy': {} + 'f:autosupportSerialNumber': {} + 'f:debug': {} + 'f:enableNodePrep': {} + 'f:imagePullSecrets': {} + 'f:imageRegistry': {} + 'f:k8sTimeout': {} + 'f:kubeletDir': {} + 'f:logFormat': {} + 'f:silenceAutosupport': {} + 'f:tridentImage': {} + 'f:message': {} + 'f:namespace': {} + 'f:status': {} + 'f:version': {} + manager: trident-operator + operation: Update + time: '2021-06-24T10:51:35Z' + name: trident + resourceVersion: '3424514' + uid: eb768637-6b11-4e70-8646-43c2117bc202 +spec: + IPv6: false + autosupportImage: 'netapp/trident-autosupport:21.01' + autosupportProxy: null + debug: false + enableNodePrep: false + k8sTimeout: 0 + kubeletDir: null + logFormat: text + namespace: trident + silenceAutosupport: false + tridentImage: 'netapp/trident:21.04.0' +status: + currentInstallationParams: + IPv6: 'false' + autosupportHostname: '' + autosupportImage: 'netapp/trident-autosupport:21.01' + autosupportProxy: '' + autosupportSerialNumber: '' + debug: 'false' + enableNodePrep: 'false' + imagePullSecrets: [] + imageRegistry: '' + k8sTimeout: '30' + kubeletDir: /var/lib/kubelet + logFormat: text + silenceAutosupport: 'false' + tridentImage: 'netapp/trident:21.04.0' + message: Trident installed + namespace: trident + status: Installed + version: v21.04.0 diff --git a/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/progressing.yaml b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/progressing.yaml new file mode 100644 index 0000000..6647b8d --- /dev/null +++ b/pkg/resource_customizations/trident.netapp.io/TridentOrchestrator/testdata/progressing.yaml @@ -0,0 +1,91 @@ +apiVersion: trident.netapp.io/v1 +kind: TridentOrchestrator +metadata: + creationTimestamp: '2021-06-24T11:27:54Z' + generation: 1 + labels: + app.kubernetes.io/instance: trident-operator + managedFields: + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + 'f:labels': + .: {} + 'f:app.kubernetes.io/instance': {} + 'f:spec': + .: {} + 'f:autosupportImage': {} + 'f:debug': {} + 'f:logFormat': {} + 'f:namespace': {} + 'f:tridentImage': {} + manager: argocd-application-controller + operation: Update + time: '2021-06-24T11:27:54Z' + - apiVersion: trident.netapp.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:status': + .: {} + 'f:currentInstallationParams': + .: {} + 'f:IPv6': {} + 'f:autosupportHostname': {} + 'f:autosupportImage': {} + 'f:autosupportProxy': {} + 'f:autosupportSerialNumber': {} + 'f:debug': {} + 'f:enableNodePrep': {} + 'f:imagePullSecrets': {} + 'f:imageRegistry': {} + 'f:k8sTimeout': {} + 'f:kubeletDir': {} + 'f:logFormat': {} + 'f:silenceAutosupport': {} + 'f:tridentImage': {} + 'f:message': {} + 'f:namespace': {} + 'f:status': {} + 'f:version': {} + manager: trident-operator + operation: Update + time: '2021-06-24T11:27:55Z' + name: trident + resourceVersion: '3428714' + uid: 4353204f-59f7-41be-a853-dcf114011222 +spec: + IPv6: false + autosupportImage: 'netapp/trident-autosupport:21.01' + autosupportProxy: null + debug: false + enableNodePrep: false + k8sTimeout: 0 + kubeletDir: null + logFormat: text + namespace: trident + silenceAutosupport: false + tridentImage: 'netapp/trident:21.04.0' +status: + currentInstallationParams: + IPv6: '' + autosupportHostname: '' + autosupportImage: '' + autosupportProxy: '' + autosupportSerialNumber: '' + debug: '' + enableNodePrep: '' + imagePullSecrets: null + imageRegistry: '' + k8sTimeout: '' + kubeletDir: '' + logFormat: '' + silenceAutosupport: '' + tridentImage: '' + message: Installing Trident + namespace: trident + status: Installing + version: '' diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/health.lua b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/health.lua new file mode 100644 index 0000000..5e94523 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/health.lua @@ -0,0 +1,37 @@ +local health_status = {} +if obj.status == nil then + health_status.status = "Progressing" + health_status.message = "Current resource status is insufficient" + return health_status +end + +if obj.spec.clusters == nil or #obj.spec.clusters == 0 then + health_status.status = "Progressing" + health_status.message = "Current resource status is insufficient" + return health_status +end + +if obj.status.aggregatedStatus == nil or #obj.spec.clusters ~= #obj.status.aggregatedStatus then + health_status.status = "Progressing" + health_status.message = "Current resource status is insufficient" + return health_status +end + +for i, status in ipairs(obj.status.aggregatedStatus) do + if status.health == "Unhealthy" then + health_status.status = "Degraded" + health_status.message = "Current resource status is unhealthy" + return health_status + end + + if status.health == "Unknown" then + if status.applied ~= true then + health_status.status = "Degraded" + health_status.message = "Current resource status is unhealthy" + return health_status + end + end +end + +health_status.status = "Healthy" +return health_status diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/health_test.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/health_test.yaml new file mode 100644 index 0000000..3ebee0e --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/health_test.yaml @@ -0,0 +1,31 @@ +tests: + - healthStatus: + status: Progressing + message: "Current resource status is insufficient" + inputPath: testdata/progressing.yaml + - healthStatus: + status: Progressing + message: "Current resource status is insufficient" + inputPath: testdata/progressing_aggregatedStatus.yaml + - healthStatus: + status: Progressing + message: "Current resource status is insufficient" + inputPath: testdata/progressing_cluster.yaml + - healthStatus: + status: Degraded + message: "Current resource status is unhealthy" + inputPath: testdata/degraded_unapplied.yaml + - healthStatus: + status: Degraded + message: "Current resource status is unhealthy" + inputPath: testdata/degraded_unknown.yaml + - healthStatus: + status: Degraded + message: "Current resource status is unhealthy" + inputPath: testdata/degraded_unhealth.yaml + - healthStatus: + status: Healthy + inputPath: testdata/health.yaml + - healthStatus: + status: Healthy + inputPath: testdata/health_unknown.yaml diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unapplied.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unapplied.yaml new file mode 100644 index 0000000..94d7370 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unapplied.yaml @@ -0,0 +1,46 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ClusterResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + clusterpropagationpolicy.karmada.io/name: service-testk4j5t + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: + - name: member1 + resource: + apiVersion: v1 + kind: Service + name: service-test + namespace: default + resourceVersion: "3943220" + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 +status: + aggregatedStatus: + - applied: false + clusterName: member1 + health: Unhealthy + conditions: + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 2 diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unhealth.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unhealth.yaml new file mode 100644 index 0000000..6c39b88 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unhealth.yaml @@ -0,0 +1,46 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ClusterResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + clusterpropagationpolicy.karmada.io/name: service-testk4j5t + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: + - name: member1 + resource: + apiVersion: v1 + kind: Service + name: service-test + namespace: default + resourceVersion: "3943220" + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 +status: + aggregatedStatus: + - applied: true + clusterName: member1 + health: Unhealthy + conditions: + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 2 diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unknown.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unknown.yaml new file mode 100644 index 0000000..b87e39e --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/degraded_unknown.yaml @@ -0,0 +1,46 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ClusterResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + clusterpropagationpolicy.karmada.io/name: service-testk4j5t + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: + - name: member1 + resource: + apiVersion: v1 + kind: Service + name: service-test + namespace: default + resourceVersion: "3943220" + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 +status: + aggregatedStatus: + - applied: false + clusterName: member1 + health: Unknown + conditions: + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 2 diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/health.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/health.yaml new file mode 100644 index 0000000..9e8a87d --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/health.yaml @@ -0,0 +1,83 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ClusterResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + clusterpropagationpolicy.karmada.io/name: service-testk4j5t + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: + - name: member1 + - name: member2 + - name: member3 + replicaRequirements: + nodeClaim: + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + resourceRequest: + cpu: 250m + memory: 512Mi + replicas: 1 + resource: + apiVersion: apps/v1 + kind: Deployment + name: test1 + namespace: default + resourceVersion: "3663243" + uid: 58ccb955-4da6-4167-9b65-dddadcef569e +status: + aggregatedStatus: + - applied: true + clusterName: member1 + health: Healthy + status: + availableReplicas: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + - applied: true + clusterName: member2 + health: Healthy + status: + replicas: 1 + unavailableReplicas: 1 + updatedReplicas: 1 + - applied: true + clusterName: member3 + health: Healthy + status: + availableReplicas: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + conditions: + - lastTransitionTime: "2022-11-02T02:49:06Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-10-28T09:56:31Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 7 diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/health_unknown.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/health_unknown.yaml new file mode 100644 index 0000000..4a356a9 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/health_unknown.yaml @@ -0,0 +1,46 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ClusterResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + clusterpropagationpolicy.karmada.io/name: service-testk4j5t + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: + - name: member1 + resource: + apiVersion: v1 + kind: Service + name: service-test + namespace: default + resourceVersion: "3943220" + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 +status: + aggregatedStatus: + - applied: true + clusterName: member1 + health: Unknown + conditions: + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 2 diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing.yaml new file mode 100644 index 0000000..8d4c5f2 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing.yaml @@ -0,0 +1,41 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ClusterResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + clusterpropagationpolicy.karmada.io/name: service-testk4j5t + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + resource: + apiVersion: v1 + kind: Service + name: test + namespace: default + resourceVersion: "2605059" + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af +status: + conditions: + - lastTransitionTime: "2022-10-27T07:19:50Z" + message: '0/3 clusters are available: 3 cluster(s) didn''t match the placement + cluster affinity constraint.' + reason: BindingFailedScheduling + status: "False" + type: Scheduled + - lastTransitionTime: "2022-10-27T07:19:52Z" + message: Failed to apply all works, see status.aggregatedStatus for details + reason: FullyAppliedFailed + status: "False" + type: FullyApplied + schedulerObservedGeneration: 4 diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing_aggregatedStatus.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing_aggregatedStatus.yaml new file mode 100644 index 0000000..5e7e851 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing_aggregatedStatus.yaml @@ -0,0 +1,46 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ClusterResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + clusterpropagationpolicy.karmada.io/name: service-testk4j5t + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: + - name: member1 + - name: member2 + - name: member3 +status: + aggregatedStatus: + - applied: true + clusterName: member1 + health: Healthy + status: + availableReplicas: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + conditions: + - ansibleResult: + changed: 1 + completion: 2020-06-08T13:41:20.133525 + failures: 0 + ok: 56 + skipped: 82 + lastTransitionTime: "2020-06-04T17:47:31Z" + message: Reconciling + reason: Running + status: "True" + type: Running diff --git a/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing_cluster.yaml b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing_cluster.yaml new file mode 100644 index 0000000..08d63e3 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ClusterResourceBinding/testdata/progressing_cluster.yaml @@ -0,0 +1,34 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ClusterResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + clusterpropagationpolicy.karmada.io/name: service-testk4j5t + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: [] +status: + conditions: + - ansibleResult: + changed: 1 + completion: 2020-06-08T13:41:20.133525 + failures: 0 + ok: 56 + skipped: 82 + lastTransitionTime: "2020-06-04T17:47:31Z" + message: Reconciling + reason: Running + status: "True" + type: Running diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/health.lua b/pkg/resource_customizations/work.karmada.io/ResourceBinding/health.lua new file mode 100644 index 0000000..5e94523 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/health.lua @@ -0,0 +1,37 @@ +local health_status = {} +if obj.status == nil then + health_status.status = "Progressing" + health_status.message = "Current resource status is insufficient" + return health_status +end + +if obj.spec.clusters == nil or #obj.spec.clusters == 0 then + health_status.status = "Progressing" + health_status.message = "Current resource status is insufficient" + return health_status +end + +if obj.status.aggregatedStatus == nil or #obj.spec.clusters ~= #obj.status.aggregatedStatus then + health_status.status = "Progressing" + health_status.message = "Current resource status is insufficient" + return health_status +end + +for i, status in ipairs(obj.status.aggregatedStatus) do + if status.health == "Unhealthy" then + health_status.status = "Degraded" + health_status.message = "Current resource status is unhealthy" + return health_status + end + + if status.health == "Unknown" then + if status.applied ~= true then + health_status.status = "Degraded" + health_status.message = "Current resource status is unhealthy" + return health_status + end + end +end + +health_status.status = "Healthy" +return health_status diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/health_test.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/health_test.yaml new file mode 100644 index 0000000..3ebee0e --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/health_test.yaml @@ -0,0 +1,31 @@ +tests: + - healthStatus: + status: Progressing + message: "Current resource status is insufficient" + inputPath: testdata/progressing.yaml + - healthStatus: + status: Progressing + message: "Current resource status is insufficient" + inputPath: testdata/progressing_aggregatedStatus.yaml + - healthStatus: + status: Progressing + message: "Current resource status is insufficient" + inputPath: testdata/progressing_cluster.yaml + - healthStatus: + status: Degraded + message: "Current resource status is unhealthy" + inputPath: testdata/degraded_unapplied.yaml + - healthStatus: + status: Degraded + message: "Current resource status is unhealthy" + inputPath: testdata/degraded_unknown.yaml + - healthStatus: + status: Degraded + message: "Current resource status is unhealthy" + inputPath: testdata/degraded_unhealth.yaml + - healthStatus: + status: Healthy + inputPath: testdata/health.yaml + - healthStatus: + status: Healthy + inputPath: testdata/health_unknown.yaml diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unapplied.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unapplied.yaml new file mode 100644 index 0000000..f36b1d3 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unapplied.yaml @@ -0,0 +1,50 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 2 + labels: + propagationpolicy.karmada.io/name: service-service-testn7qfx + propagationpolicy.karmada.io/namespace: default + name: service-test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: service-test + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 + resourceVersion: "3943290" + uid: 1459071b-e460-4038-8a5d-d2a4803ad6f1 +spec: + clusters: + - name: member1 + - name: member2 + resource: + apiVersion: v1 + kind: Service + name: service-test + namespace: default + resourceVersion: "3943220" + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 +status: + aggregatedStatus: + - applied: false + clusterName: member1 + health: Unhealthy + - clusterName: member2 + health: Unhealthy + conditions: + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 2 diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unhealth.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unhealth.yaml new file mode 100644 index 0000000..c410059 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unhealth.yaml @@ -0,0 +1,47 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 2 + labels: + propagationpolicy.karmada.io/name: service-service-testn7qfx + propagationpolicy.karmada.io/namespace: default + name: service-test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: service-test + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 + resourceVersion: "3943290" + uid: 1459071b-e460-4038-8a5d-d2a4803ad6f1 +spec: + clusters: + - name: member1 + resource: + apiVersion: v1 + kind: Service + name: service-test + namespace: default + resourceVersion: "3943220" + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 +status: + aggregatedStatus: + - applied: true + clusterName: member1 + health: Unhealthy + conditions: + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 2 diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unknown.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unknown.yaml new file mode 100644 index 0000000..6c447fd --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/degraded_unknown.yaml @@ -0,0 +1,47 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 2 + labels: + propagationpolicy.karmada.io/name: service-service-testn7qfx + propagationpolicy.karmada.io/namespace: default + name: service-test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: service-test + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 + resourceVersion: "3943290" + uid: 1459071b-e460-4038-8a5d-d2a4803ad6f1 +spec: + clusters: + - name: member1 + resource: + apiVersion: v1 + kind: Service + name: service-test + namespace: default + resourceVersion: "3943220" + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 +status: + aggregatedStatus: + - applied: false + clusterName: member1 + health: Unknown + conditions: + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 2 diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/health.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/health.yaml new file mode 100644 index 0000000..428cf4f --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/health.yaml @@ -0,0 +1,84 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 7 + labels: + propagationpolicy.karmada.io/name: test1-57dcd6489f + propagationpolicy.karmada.io/namespace: default + name: test1-deployment + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: Deployment + name: test1 + uid: 58ccb955-4da6-4167-9b65-dddadcef569e + resourceVersion: "3903091" + uid: a46f5454-f8c5-4c9e-9119-3022aa5943b1 +spec: + clusters: + - name: member1 + - name: member2 + - name: member3 + replicaRequirements: + nodeClaim: + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + resourceRequest: + cpu: 250m + memory: 512Mi + replicas: 1 + resource: + apiVersion: apps/v1 + kind: Deployment + name: test1 + namespace: default + resourceVersion: "3663243" + uid: 58ccb955-4da6-4167-9b65-dddadcef569e +status: + aggregatedStatus: + - applied: true + clusterName: member1 + health: Healthy + status: + availableReplicas: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + - applied: true + clusterName: member2 + health: Healthy + status: + replicas: 1 + unavailableReplicas: 1 + updatedReplicas: 1 + - applied: true + clusterName: member3 + health: Healthy + status: + availableReplicas: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + conditions: + - lastTransitionTime: "2022-11-02T02:49:06Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-10-28T09:56:31Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 7 diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/health_unknown.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/health_unknown.yaml new file mode 100644 index 0000000..47d1843 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/health_unknown.yaml @@ -0,0 +1,47 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 2 + labels: + propagationpolicy.karmada.io/name: service-service-testn7qfx + propagationpolicy.karmada.io/namespace: default + name: service-test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: service-test + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 + resourceVersion: "3943290" + uid: 1459071b-e460-4038-8a5d-d2a4803ad6f1 +spec: + clusters: + - name: member1 + resource: + apiVersion: v1 + kind: Service + name: service-test + namespace: default + resourceVersion: "3943220" + uid: 9c2b39b9-4607-4795-87db-1a54680939d0 +status: + aggregatedStatus: + - applied: true + clusterName: member1 + health: Unknown + conditions: + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: All works have been successfully applied + reason: FullyAppliedSuccess + status: "True" + type: FullyApplied + - lastTransitionTime: "2022-11-03T10:56:30Z" + message: Binding has been scheduled + reason: BindingScheduled + status: "True" + type: Scheduled + schedulerObservedGeneration: 2 diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing.yaml new file mode 100644 index 0000000..5c8d8a3 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing.yaml @@ -0,0 +1,42 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + propagationpolicy.karmada.io/name: service-testk4j5t + propagationpolicy.karmada.io/namespace: default + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + resource: + apiVersion: v1 + kind: Service + name: test + namespace: default + resourceVersion: "2605059" + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af +status: + conditions: + - lastTransitionTime: "2022-10-27T07:19:50Z" + message: '0/3 clusters are available: 3 cluster(s) didn''t match the placement + cluster affinity constraint.' + reason: BindingFailedScheduling + status: "False" + type: Scheduled + - lastTransitionTime: "2022-10-27T07:19:52Z" + message: Failed to apply all works, see status.aggregatedStatus for details + reason: FullyAppliedFailed + status: "False" + type: FullyApplied + schedulerObservedGeneration: 4 diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing_aggregatedStatus.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing_aggregatedStatus.yaml new file mode 100644 index 0000000..d4ba59c --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing_aggregatedStatus.yaml @@ -0,0 +1,47 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + propagationpolicy.karmada.io/name: service-testk4j5t + propagationpolicy.karmada.io/namespace: default + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: + - name: member1 + - name: member2 + - name: member3 +status: + aggregatedStatus: + - applied: true + clusterName: member1 + health: Healthy + status: + availableReplicas: 1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + conditions: + - ansibleResult: + changed: 1 + completion: 2020-06-08T13:41:20.133525 + failures: 0 + ok: 56 + skipped: 82 + lastTransitionTime: "2020-06-04T17:47:31Z" + message: Reconciling + reason: Running + status: "True" + type: Running diff --git a/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing_cluster.yaml b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing_cluster.yaml new file mode 100644 index 0000000..0463820 --- /dev/null +++ b/pkg/resource_customizations/work.karmada.io/ResourceBinding/testdata/progressing_cluster.yaml @@ -0,0 +1,35 @@ +apiVersion: work.karmada.io/v1alpha2 +kind: ResourceBinding +metadata: + finalizers: + - karmada.io/binding-controller + generation: 5 + labels: + propagationpolicy.karmada.io/name: service-testk4j5t + propagationpolicy.karmada.io/namespace: default + name: test-service + namespace: default + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: test + uid: 039b0d1a-05cb-40b4-b43a-438b0de386af + resourceVersion: "4106772" + uid: 3932ee50-4c2b-4e77-9bfb-45eeb4ec220f +spec: + clusters: [] +status: + conditions: + - ansibleResult: + changed: 1 + completion: 2020-06-08T13:41:20.133525 + failures: 0 + ok: 56 + skipped: 82 + lastTransitionTime: "2020-06-04T17:47:31Z" + message: Reconciling + reason: Running + status: "True" + type: Running diff --git a/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/health.lua b/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/health.lua new file mode 100644 index 0000000..9e597ae --- /dev/null +++ b/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/health.lua @@ -0,0 +1,11 @@ +local health_status = {} +if obj.status ~= nil then + if obj.status.readyReplicas ~= 0 and obj.status.readyReplicas == obj.status.replicas then + health_status.status = "Healthy" + health_status.message = "All ZK Nodes have joined the ensemble" + return health_status + end +end +health_status.status = "Progressing" +health_status.message = "Waiting for ZK Nodes to join the ensemble" +return health_status \ No newline at end of file diff --git a/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/health_test.yaml b/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/health_test.yaml new file mode 100644 index 0000000..9ed9cfa --- /dev/null +++ b/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/health_test.yaml @@ -0,0 +1,9 @@ +tests: +- healthStatus: + status: Progressing + message: "Waiting for ZK Nodes to join the ensemble" + inputPath: testdata/progressing.yaml +- healthStatus: + status: Healthy + message: "All ZK Nodes have joined the ensemble" + inputPath: testdata/healthy.yaml diff --git a/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/testdata/healthy.yaml b/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/testdata/healthy.yaml new file mode 100644 index 0000000..e771259 --- /dev/null +++ b/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/testdata/healthy.yaml @@ -0,0 +1,21 @@ +apiVersion: zookeeper.pravega.io/v1beta1 +kind: ZookeeperCluster +metadata: + generation: 3 + labels: + argocd.argoproj.io/instance: test-cluster-zookeeper + name: test-cluster-zookeeper + namespace: zookeeper + resourceVersion: "265193" + selfLink: /apis/zookeeper.pravega.io/v1beta1/namespaces/zookeeper/zookeeperclusters/test-cluster-zookeeper + uid: eae42b94-b8ee-4fd7-9d92-30c9a30a4d5a +spec: {} +status: + externalClientEndpoint: N/A + internalClientEndpoint: 10.101.203.230:2181 + members: + ready: + - test-cluster-zookeeper-0 + unready: null + readyReplicas: 1 + replicas: 1 \ No newline at end of file diff --git a/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/testdata/progressing.yaml b/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/testdata/progressing.yaml new file mode 100644 index 0000000..6b734f2 --- /dev/null +++ b/pkg/resource_customizations/zookeeper.pravega.io/ZookeeperCluster/testdata/progressing.yaml @@ -0,0 +1,21 @@ +apiVersion: zookeeper.pravega.io/v1beta1 +kind: ZookeeperCluster +metadata: + generation: 3 + labels: + argocd.argoproj.io/instance: test-cluster-zookeeper + name: test-cluster-zookeeper + namespace: zookeeper + resourceVersion: "265193" + selfLink: /apis/zookeeper.pravega.io/v1beta1/namespaces/zookeeper/zookeeperclusters/test-cluster-zookeeper + uid: eae42b94-b8ee-4fd7-9d92-30c9a30a4d5a +spec: {} +status: + externalClientEndpoint: N/A + internalClientEndpoint: 10.101.203.230:2181 + members: + ready: + - test-cluster-zookeeper-0 + unready: null + readyReplicas: 0 + replicas: 1 \ No newline at end of file