From 85410d6bba4c73cf1cbdcea22c4d75539827b49b Mon Sep 17 00:00:00 2001 From: Fira Curie Date: Sat, 17 Aug 2024 18:25:31 +0200 Subject: [PATCH 01/16] Add new gitlab-runner-scaler; bootstrap configuration Signed-off-by: Fira Curie --- pkg/scalers/gitlab_runner_scaler.go | 136 ++++++++++++++++++++++++++++ pkg/scaling/scalers_builder.go | 2 + 2 files changed, 138 insertions(+) create mode 100644 pkg/scalers/gitlab_runner_scaler.go diff --git a/pkg/scalers/gitlab_runner_scaler.go b/pkg/scalers/gitlab_runner_scaler.go new file mode 100644 index 00000000000..e0f20aadf20 --- /dev/null +++ b/pkg/scalers/gitlab_runner_scaler.go @@ -0,0 +1,136 @@ +package scalers + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/go-logr/logr" + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/metrics/pkg/apis/external_metrics" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" + kedautil "github.com/kedacore/keda/v2/pkg/util" +) + +const ( + defaultTargetPipelineQueueLength = 1 + defaultGitlabAPIURL = "https://gitlab.com" +) + +type gitlabRunnerScaler struct { + metricType v2.MetricTargetType + metadata *gitlabRunnerMetadata + httpClient *http.Client + logger logr.Logger +} + +type gitlabRunnerMetadata struct { + gitlabAPIURL *url.URL + personalAccessToken string + projectID string + + targetWorkflowQueueLength int64 + triggerIndex int +} + +// NewGitLabRunnerScaler creates a new GitLab Runner Scaler +func NewGitLabRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { + httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false) + + metricType, err := GetMetricTargetType(config) + if err != nil { + return nil, fmt.Errorf("error getting scaler metric type: %w", err) + } + + meta, err := parseGitLabRunnerMetadata(config) + if err != nil { + return nil, fmt.Errorf("error parsing GitLab Runner metadata: %w", err) + } + + return &gitlabRunnerScaler{ + metricType: metricType, + metadata: meta, + httpClient: httpClient, + logger: InitializeLogger(config, "gitlab_runner_scaler"), + }, nil +} + +func parseGitLabRunnerMetadata(config *scalersconfig.ScalerConfig) (*gitlabRunnerMetadata, error) { + meta := &gitlabRunnerMetadata{} + meta.targetWorkflowQueueLength = defaultTargetWorkflowQueueLength + + // Get the GitLab API URL + gitlabAPIURLValue, err := getValueFromMetaOrEnv("gitlabAPIURL", config.TriggerMetadata, config.ResolvedEnv) + if err != nil || gitlabAPIURLValue == "" { + gitlabAPIURLValue = defaultGitlabAPIURL + } + + gitlabAPIURL, err := url.Parse(gitlabAPIURLValue) + if err != nil { + return nil, fmt.Errorf("parsing gitlabAPIURL: %w", err) + } + meta.gitlabAPIURL = gitlabAPIURL + + // Get the projectID + projectIDValue, err := getValueFromMetaOrEnv("projectID", config.TriggerMetadata, config.ResolvedEnv) + if err != nil || projectIDValue == "" { + return nil, err + } + meta.projectID = projectIDValue + + // Get the targetWorkflowQueueLength + targetWorkflowQueueLength, err := getInt64ValueFromMetaOrEnv("targetWorkflowQueueLength", config) + if err != nil || targetWorkflowQueueLength == 0 { + meta.targetWorkflowQueueLength = defaultTargetPipelineQueueLength + } + meta.targetWorkflowQueueLength = targetWorkflowQueueLength + + // Get the personalAccessToken + personalAccessToken, ok := config.AuthParams["personalAccessToken"] + if !ok || personalAccessToken == "" { + return nil, errors.New("no personalAccessToken provided") + } + + meta.personalAccessToken = personalAccessToken + meta.triggerIndex = config.TriggerIndex + + return meta, nil +} + +func (s *gitlabRunnerScaler) GetWorkflowQueueLength(context.Context) (int64, error) { + return 0, nil +} + +func (s *gitlabRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + queueLen, err := s.GetWorkflowQueueLength(ctx) + + if err != nil { + s.logger.Error(err, "error getting workflow queue length") + return []external_metrics.ExternalMetricValue{}, false, err + } + + metric := GenerateMetricInMili(metricName, float64(queueLen)) + + return []external_metrics.ExternalMetricValue{metric}, queueLen >= s.metadata.targetWorkflowQueueLength, nil +} + +func (s *gitlabRunnerScaler) GetMetricSpecForScaling(_ context.Context) []v2.MetricSpec { + externalMetric := &v2.ExternalMetricSource{ + Metric: v2.MetricIdentifier{ + Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("gitlab-runner-%s", s.metadata.projectID))), + }, + Target: GetMetricTarget(s.metricType, s.metadata.targetWorkflowQueueLength), + } + metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} + return []v2.MetricSpec{metricSpec} +} + +func (s *gitlabRunnerScaler) Close(_ context.Context) error { + if s.httpClient != nil { + s.httpClient.CloseIdleConnections() + } + return nil +} diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 1f4549c7ffa..65ddb367881 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -183,6 +183,8 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, return scalers.NewStackdriverScaler(ctx, config) case "gcp-storage": return scalers.NewGcsScaler(config) + case "gitlab-runner": + return scalers.NewGitLabRunnerScaler(config) case "github-runner": return scalers.NewGitHubRunnerScaler(config) case "graphite": From b922d6223ee3aa8db6a522bd47c885d66b119dc4 Mon Sep 17 00:00:00 2001 From: Fira Curie Date: Sat, 17 Aug 2024 23:40:48 +0200 Subject: [PATCH 02/16] Query gitlab (with pagination) to find out the number of pipelines in "waiting for resource" status Signed-off-by: Fira Curie --- pkg/scalers/gitlab_runner_scaler.go | 136 +++++++++++++++++++++++----- 1 file changed, 111 insertions(+), 25 deletions(-) diff --git a/pkg/scalers/gitlab_runner_scaler.go b/pkg/scalers/gitlab_runner_scaler.go index e0f20aadf20..1026609c8e3 100644 --- a/pkg/scalers/gitlab_runner_scaler.go +++ b/pkg/scalers/gitlab_runner_scaler.go @@ -2,6 +2,7 @@ package scalers import ( "context" + "encoding/json" "errors" "fmt" "net/http" @@ -16,8 +17,18 @@ import ( ) const ( + // externalMetricType is the type of the external metric. defaultTargetPipelineQueueLength = 1 - defaultGitlabAPIURL = "https://gitlab.com" + // defaultGitlabAPIURL is the default GitLab API base URL. + defaultGitlabAPIURL = "https://gitlab.com" + + // pipelineWaitingForResourceStatus is the status of the pipelines that are waiting for resources. + pipelineWaitingForResourceStatus = "waiting_for_resource" + + // maxGitlabAPIPageCount is the maximum number of pages to query for pipelines. + maxGitlabAPIPageCount = 50 + // gitlabAPIPerPage is the number of pipelines to query per page. + gitlabAPIPerPage = "200" ) type gitlabRunnerScaler struct { @@ -32,7 +43,7 @@ type gitlabRunnerMetadata struct { personalAccessToken string projectID string - targetWorkflowQueueLength int64 + targetPipelineQueueLength int64 triggerIndex int } @@ -60,33 +71,22 @@ func NewGitLabRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { func parseGitLabRunnerMetadata(config *scalersconfig.ScalerConfig) (*gitlabRunnerMetadata, error) { meta := &gitlabRunnerMetadata{} - meta.targetWorkflowQueueLength = defaultTargetWorkflowQueueLength - - // Get the GitLab API URL - gitlabAPIURLValue, err := getValueFromMetaOrEnv("gitlabAPIURL", config.TriggerMetadata, config.ResolvedEnv) - if err != nil || gitlabAPIURLValue == "" { - gitlabAPIURLValue = defaultGitlabAPIURL - } - - gitlabAPIURL, err := url.Parse(gitlabAPIURLValue) - if err != nil { - return nil, fmt.Errorf("parsing gitlabAPIURL: %w", err) - } - meta.gitlabAPIURL = gitlabAPIURL + meta.targetPipelineQueueLength = defaultTargetWorkflowQueueLength // Get the projectID projectIDValue, err := getValueFromMetaOrEnv("projectID", config.TriggerMetadata, config.ResolvedEnv) - if err != nil || projectIDValue == "" { + if err != nil { return nil, err } + meta.projectID = projectIDValue // Get the targetWorkflowQueueLength targetWorkflowQueueLength, err := getInt64ValueFromMetaOrEnv("targetWorkflowQueueLength", config) if err != nil || targetWorkflowQueueLength == 0 { - meta.targetWorkflowQueueLength = defaultTargetPipelineQueueLength + meta.targetPipelineQueueLength = defaultTargetPipelineQueueLength } - meta.targetWorkflowQueueLength = targetWorkflowQueueLength + meta.targetPipelineQueueLength = targetWorkflowQueueLength // Get the personalAccessToken personalAccessToken, ok := config.AuthParams["personalAccessToken"] @@ -95,17 +95,30 @@ func parseGitLabRunnerMetadata(config *scalersconfig.ScalerConfig) (*gitlabRunne } meta.personalAccessToken = personalAccessToken + + // Get the GitLab API URL + gitlabAPIURLValue, err := getValueFromMetaOrEnv("gitlabAPIURL", config.TriggerMetadata, config.ResolvedEnv) + if err != nil || gitlabAPIURLValue == "" { + gitlabAPIURLValue = defaultGitlabAPIURL + } + + gitlabAPIURL, err := url.Parse(gitlabAPIURLValue) + if err != nil { + return nil, fmt.Errorf("parsing gitlabAPIURL: %w", err) + } + + // Construct the GitLab API URL + uri := constructGitlabAPIPipelinesURL(*gitlabAPIURL, projectIDValue, pipelineWaitingForResourceStatus) + + meta.gitlabAPIURL = &uri + meta.triggerIndex = config.TriggerIndex return meta, nil } -func (s *gitlabRunnerScaler) GetWorkflowQueueLength(context.Context) (int64, error) { - return 0, nil -} - func (s *gitlabRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - queueLen, err := s.GetWorkflowQueueLength(ctx) + queueLen, err := s.getPipelineQueueLength(ctx) if err != nil { s.logger.Error(err, "error getting workflow queue length") @@ -114,7 +127,7 @@ func (s *gitlabRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricNa metric := GenerateMetricInMili(metricName, float64(queueLen)) - return []external_metrics.ExternalMetricValue{metric}, queueLen >= s.metadata.targetWorkflowQueueLength, nil + return []external_metrics.ExternalMetricValue{metric}, queueLen >= s.metadata.targetPipelineQueueLength, nil } func (s *gitlabRunnerScaler) GetMetricSpecForScaling(_ context.Context) []v2.MetricSpec { @@ -122,7 +135,7 @@ func (s *gitlabRunnerScaler) GetMetricSpecForScaling(_ context.Context) []v2.Met Metric: v2.MetricIdentifier{ Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("gitlab-runner-%s", s.metadata.projectID))), }, - Target: GetMetricTarget(s.metricType, s.metadata.targetWorkflowQueueLength), + Target: GetMetricTarget(s.metricType, s.metadata.targetPipelineQueueLength), } metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2.MetricSpec{metricSpec} @@ -134,3 +147,76 @@ func (s *gitlabRunnerScaler) Close(_ context.Context) error { } return nil } +func constructGitlabAPIPipelinesURL(baseURL url.URL, projectID string, status string) url.URL { + baseURL.Path = "/api/v4/projects/" + projectID + "/pipelines" + + qParams := baseURL.Query() + qParams.Set("status", status) + qParams.Set("per_page", gitlabAPIPerPage) + + baseURL.RawQuery = qParams.Encode() + + return baseURL +} + +// getPipelineCount returns the number of pipelines in the GitLab project (per the page set in url) +func (s *gitlabRunnerScaler) getPipelineCount(ctx context.Context, uri string) (int64, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return 0, fmt.Errorf("creating request: %w", err) + } + + req.Header.Set("Accept", "application/json") + req.Header.Set("Content-Type", "application/json") + req.Header.Set("PRIVATE-TOKEN", s.metadata.personalAccessToken) + + res, err := s.httpClient.Do(req) + if err != nil { + return 0, fmt.Errorf("doing request: %w", err) + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + return 0, fmt.Errorf("unexpected status code: %d", res.StatusCode) + } + + gitlabPipelines := make([]struct{}, 0) + if err := json.NewDecoder(res.Body).Decode(&gitlabPipelines); err != nil { + return 0, fmt.Errorf("decoding response: %w", err) + } + + return int64(len(gitlabPipelines)), nil +} + +// getPipelineQueueLength returns the number of pipelines in the +// GitLab project that are waiting for resources. +func (s *gitlabRunnerScaler) getPipelineQueueLength(ctx context.Context) (int64, error) { + var count int64 + + page := 1 + for ; page < maxGitlabAPIPageCount; page++ { + pagedURL := pagedURL(*s.metadata.gitlabAPIURL, fmt.Sprint(page)) + + gitlabPipelinesLen, err := s.getPipelineCount(ctx, pagedURL.String()) + if err != nil { + return 0, err + } + + if gitlabPipelinesLen == 0 { + break + } + + count += gitlabPipelinesLen + } + + return count, nil +} + +func pagedURL(uri url.URL, page string) url.URL { + qParams := uri.Query() + qParams.Set("page", fmt.Sprint(page)) + + uri.RawQuery = qParams.Encode() + + return uri +} From 10ee5bde3e8cc048f069c3687a8e7bf6a506080a Mon Sep 17 00:00:00 2001 From: SpiritZhou Date: Mon, 12 Aug 2024 15:22:20 +0800 Subject: [PATCH 03/16] Introduce ClusterCloudEventSource (#5816) Co-authored-by: Tom Kerkhove Signed-off-by: Fira Curie --- CHANGELOG.md | 2 +- .../v1alpha1/cloudeventsource_types.go | 54 ++- .../v1alpha1/cloudeventsource_webhook.go | 37 +- .../v1alpha1/zz_generated.deepcopy.go | 59 ++++ cmd/operator/main.go | 7 + cmd/webhooks/main.go | 4 + ...ting.keda.sh_clustercloudeventsources.yaml | 138 ++++++++ config/crd/kustomization.yaml | 1 + config/rbac/role.yaml | 7 + ...ting_v1alpha1_clustercloudeventsource.yaml | 15 + config/samples/kustomization.yaml | 1 + config/webhooks/validation_webhooks.yaml | 24 ++ .../eventing/cloudeventsource_controller.go | 116 +----- .../eventing/cloudeventsource_finalizer.go | 65 ---- .../clustercloudeventsource_controller.go | 108 ++++++ controllers/eventing/finalizer.go | 64 ++++ controllers/eventing/reconciler.go | 141 ++++++++ pkg/eventemitter/eventemitter.go | 113 +++--- pkg/mock/mock_eventemitter/mock_interface.go | 4 +- pkg/status/status.go | 8 + .../cloudevent_source_test.go | 329 ++++++++++++------ 21 files changed, 964 insertions(+), 333 deletions(-) create mode 100644 config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml create mode 100644 config/samples/eventing_v1alpha1_clustercloudeventsource.yaml delete mode 100644 controllers/eventing/cloudeventsource_finalizer.go create mode 100644 controllers/eventing/clustercloudeventsource_controller.go create mode 100644 controllers/eventing/finalizer.go create mode 100644 controllers/eventing/reconciler.go diff --git a/CHANGELOG.md b/CHANGELOG.md index a2779c2bc79..283c07d1b96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,7 +56,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **CloudEventSource**: Introduce ClusterCloudEventSource ([#3533](https://github.com/kedacore/keda/issues/3533)) #### Experimental diff --git a/apis/eventing/v1alpha1/cloudeventsource_types.go b/apis/eventing/v1alpha1/cloudeventsource_types.go index 2bfb97b928a..c872fc6acd4 100644 --- a/apis/eventing/v1alpha1/cloudeventsource_types.go +++ b/apis/eventing/v1alpha1/cloudeventsource_types.go @@ -18,10 +18,19 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" v1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) +// +kubebuilder:object:generate=false +type CloudEventSourceInterface interface { + client.Object + GenerateIdentifier() string + GetSpec() *CloudEventSourceSpec + GetStatus() *CloudEventSourceStatus +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CloudEventSource defines how a KEDA event will be sent to event sink @@ -45,6 +54,28 @@ type CloudEventSourceList struct { Items []CloudEventSource `json:"items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// +kubebuilder:resource:path=clustercloudeventsources,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Active",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].status" +type ClusterCloudEventSource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CloudEventSourceSpec `json:"spec"` + Status CloudEventSourceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterCloudEventSourceList is a list of ClusterCloudEventSource resources +type ClusterCloudEventSourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ClusterCloudEventSource `json:"items"` +} + // CloudEventSourceSpec defines the spec of CloudEventSource type CloudEventSourceSpec struct { // +optional @@ -93,7 +124,15 @@ type EventSubscription struct { } func init() { - SchemeBuilder.Register(&CloudEventSource{}, &CloudEventSourceList{}) + SchemeBuilder.Register(&CloudEventSource{}, &CloudEventSourceList{}, &ClusterCloudEventSource{}, &ClusterCloudEventSourceList{}) +} + +func (ces *CloudEventSource) GetSpec() *CloudEventSourceSpec { + return &ces.Spec +} + +func (ces *CloudEventSource) GetStatus() *CloudEventSourceStatus { + return &ces.Status } // GenerateIdentifier returns identifier for the object in for "kind.namespace.name" @@ -101,6 +140,19 @@ func (ces *CloudEventSource) GenerateIdentifier() string { return v1alpha1.GenerateIdentifier("CloudEventSource", ces.Namespace, ces.Name) } +func (cces *ClusterCloudEventSource) GetSpec() *CloudEventSourceSpec { + return &cces.Spec +} + +func (cces *ClusterCloudEventSource) GetStatus() *CloudEventSourceStatus { + return &cces.Status +} + +// GenerateIdentifier returns identifier for the object in for "kind.cluster-scoped.name" +func (cces *ClusterCloudEventSource) GenerateIdentifier() string { + return v1alpha1.GenerateIdentifier("ClusterCloudEventSource", "cluster-scoped", cces.Name) +} + // GetCloudEventSourceInitializedConditions returns CloudEventSource Conditions initialized to the default -> Status: Unknown func GetCloudEventSourceInitializedConditions() *v1alpha1.Conditions { return &v1alpha1.Conditions{{Type: v1alpha1.ConditionActive, Status: metav1.ConditionUnknown}} diff --git a/apis/eventing/v1alpha1/cloudeventsource_webhook.go b/apis/eventing/v1alpha1/cloudeventsource_webhook.go index b520fc4f27f..af7e69d7fb4 100644 --- a/apis/eventing/v1alpha1/cloudeventsource_webhook.go +++ b/apis/eventing/v1alpha1/cloudeventsource_webhook.go @@ -37,6 +37,12 @@ func (ces *CloudEventSource) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } +func (cces *ClusterCloudEventSource) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(cces). + Complete() +} + // +kubebuilder:webhook:path=/validate-eventing-keda-sh-v1alpha1-cloudeventsource,mutating=false,failurePolicy=ignore,sideEffects=None,groups=eventing.keda.sh,resources=cloudeventsources,verbs=create;update,versions=v1alpha1,name=vcloudeventsource.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &CloudEventSource{} @@ -64,6 +70,33 @@ func (ces *CloudEventSource) ValidateDelete() (admission.Warnings, error) { return nil, nil } +// +kubebuilder:webhook:path=/validate-eventing-keda-sh-v1alpha1-clustercloudeventsource,mutating=false,failurePolicy=ignore,sideEffects=None,groups=eventing.keda.sh,resources=clustercloudeventsources,verbs=create;update,versions=v1alpha1,name=vclustercloudeventsource.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &ClusterCloudEventSource{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (cces *ClusterCloudEventSource) ValidateCreate() (admission.Warnings, error) { + val, _ := json.MarshalIndent(cces, "", " ") + cloudeventsourcelog.Info(fmt.Sprintf("validating clustercloudeventsource creation for %s", string(val))) + return validateSpec(&cces.Spec) +} + +func (cces *ClusterCloudEventSource) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + val, _ := json.MarshalIndent(cces, "", " ") + cloudeventsourcelog.V(1).Info(fmt.Sprintf("validating clustercloudeventsource update for %s", string(val))) + + oldCes := old.(*ClusterCloudEventSource) + if isCloudEventSourceRemovingFinalizer(cces.ObjectMeta, oldCes.ObjectMeta, cces.Spec, oldCes.Spec) { + cloudeventsourcelog.V(1).Info("finalizer removal, skipping validation") + return nil, nil + } + return validateSpec(&cces.Spec) +} + +func (cces *ClusterCloudEventSource) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} + func isCloudEventSourceRemovingFinalizer(om metav1.ObjectMeta, oldOm metav1.ObjectMeta, spec CloudEventSourceSpec, oldSpec CloudEventSourceSpec) bool { cesSpec, _ := json.MarshalIndent(spec, "", " ") oldCesSpec, _ := json.MarshalIndent(oldSpec, "", " ") @@ -81,7 +114,7 @@ func validateSpec(spec *CloudEventSourceSpec) (admission.Warnings, error) { if spec.EventSubscription.ExcludedEventTypes != nil { for _, excludedEventType := range spec.EventSubscription.ExcludedEventTypes { if !slices.Contains(AllEventTypes, excludedEventType) { - return nil, fmt.Errorf("excludedEventType: %s in cloudeventsource spec is not supported", excludedEventType) + return nil, fmt.Errorf("excludedEventType: %s in cloudeventsource/clustercloudeventsource spec is not supported", excludedEventType) } } } @@ -89,7 +122,7 @@ func validateSpec(spec *CloudEventSourceSpec) (admission.Warnings, error) { if spec.EventSubscription.IncludedEventTypes != nil { for _, includedEventType := range spec.EventSubscription.IncludedEventTypes { if !slices.Contains(AllEventTypes, includedEventType) { - return nil, fmt.Errorf("includedEventType: %s in cloudeventsource spec is not supported", includedEventType) + return nil, fmt.Errorf("includedEventType: %s in cloudeventsource/clustercloudeventsource spec is not supported", includedEventType) } } } diff --git a/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/apis/eventing/v1alpha1/zz_generated.deepcopy.go index f38b11df437..114764d49ba 100644 --- a/apis/eventing/v1alpha1/zz_generated.deepcopy.go +++ b/apis/eventing/v1alpha1/zz_generated.deepcopy.go @@ -156,6 +156,65 @@ func (in *CloudEventSourceStatus) DeepCopy() *CloudEventSourceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCloudEventSource) DeepCopyInto(out *ClusterCloudEventSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCloudEventSource. +func (in *ClusterCloudEventSource) DeepCopy() *ClusterCloudEventSource { + if in == nil { + return nil + } + out := new(ClusterCloudEventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCloudEventSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCloudEventSourceList) DeepCopyInto(out *ClusterCloudEventSourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCloudEventSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCloudEventSourceList. +func (in *ClusterCloudEventSourceList) DeepCopy() *ClusterCloudEventSourceList { + if in == nil { + return nil + } + out := new(ClusterCloudEventSourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCloudEventSourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Destination) DeepCopyInto(out *Destination) { *out = *in diff --git a/cmd/operator/main.go b/cmd/operator/main.go index c9172cb5971..2ea6ed90f90 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -266,6 +266,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "CloudEventSource") os.Exit(1) } + if err = (eventingcontrollers.NewClusterCloudEventSourceReconciler( + mgr.GetClient(), + eventEmitter, + )).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClusterCloudEventSource") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/cmd/webhooks/main.go b/cmd/webhooks/main.go index d9832bdd1b7..46a80a3955e 100644 --- a/cmd/webhooks/main.go +++ b/cmd/webhooks/main.go @@ -156,4 +156,8 @@ func setupWebhook(mgr manager.Manager) { setupLog.Error(err, "unable to create webhook", "webhook", "CloudEventSource") os.Exit(1) } + if err := (&eventingv1alpha1.ClusterCloudEventSource{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ClusterCloudEventSource") + os.Exit(1) + } } diff --git a/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml new file mode 100644 index 00000000000..079a8976cbe --- /dev/null +++ b/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml @@ -0,0 +1,138 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clustercloudeventsources.eventing.keda.sh +spec: + group: eventing.keda.sh + names: + kind: ClusterCloudEventSource + listKind: ClusterCloudEventSourceList + plural: clustercloudeventsources + singular: clustercloudeventsource + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Active")].status + name: Active + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudEventSourceSpec defines the spec of CloudEventSource + properties: + authenticationRef: + description: |- + AuthenticationRef points to the TriggerAuthentication or ClusterTriggerAuthentication object that + is used to authenticate the scaler with the environment + properties: + kind: + description: Kind of the resource being referred to. Defaults + to TriggerAuthentication. + type: string + name: + type: string + required: + - name + type: object + clusterName: + type: string + destination: + description: Destination defines the various ways to emit events + properties: + azureEventGridTopic: + properties: + endpoint: + type: string + required: + - endpoint + type: object + http: + properties: + uri: + type: string + required: + - uri + type: object + type: object + eventSubscription: + description: EventSubscription defines filters for events + properties: + excludedEventTypes: + items: + description: CloudEventType contains the list of cloudevent + types + enum: + - keda.scaledobject.ready.v1 + - keda.scaledobject.failed.v1 + type: string + type: array + includedEventTypes: + items: + description: CloudEventType contains the list of cloudevent + types + enum: + - keda.scaledobject.ready.v1 + - keda.scaledobject.failed.v1 + type: string + type: array + type: object + required: + - destination + type: object + status: + description: CloudEventSourceStatus defines the observed state of CloudEventSource + properties: + conditions: + description: Conditions an array representation to store multiple + Conditions + items: + description: Condition to store the condition state + properties: + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 71cd2574fda..af8c7dae447 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -7,6 +7,7 @@ resources: - bases/keda.sh_triggerauthentications.yaml - bases/keda.sh_clustertriggerauthentications.yaml - bases/eventing.keda.sh_cloudeventsources.yaml +- bases/eventing.keda.sh_clustercloudeventsources.yaml # +kubebuilder:scaffold:crdkustomizeresource ## ScaledJob CRD needs to be patched because for some usecases (details in the patch file) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 6ca6d10cfe0..fd9cf99b941 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -107,6 +107,13 @@ rules: - cloudeventsources/status verbs: - '*' +- apiGroups: + - eventing.keda.sh + resources: + - clustercloudeventsources + - clustercloudeventsources/status + verbs: + - '*' - apiGroups: - keda.sh resources: diff --git a/config/samples/eventing_v1alpha1_clustercloudeventsource.yaml b/config/samples/eventing_v1alpha1_clustercloudeventsource.yaml new file mode 100644 index 00000000000..afa6ff0aaa7 --- /dev/null +++ b/config/samples/eventing_v1alpha1_clustercloudeventsource.yaml @@ -0,0 +1,15 @@ +apiVersion: eventing.keda.sh/v1alpha1 +kind: ClusterCloudEventSource +metadata: + labels: + app.kubernetes.io/name: clustercloudeventsource + app.kubernetes.io/instance: clustercloudeventsource-sample + app.kubernetes.io/part-of: keda + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: keda + name: clustercloudeventsource-sample +spec: + clusterName: clustercluster-sample + destination: + http: + uri: http://foo.bar diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 94ec29e2c3a..fc2b1f3e427 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,6 +1,7 @@ ## Append samples you want in your CSV to this file as resources ## resources: - eventing_v1alpha1_cloudeventsource.yaml +- eventing_v1alpha1_clustercloudeventsource.yaml - keda_v1alpha1_clustertriggerauthentication.yaml - keda_v1alpha1_scaledobject.yaml - keda_v1alpha1_scaledjob.yaml diff --git a/config/webhooks/validation_webhooks.yaml b/config/webhooks/validation_webhooks.yaml index 14ff71baef8..e996568ac59 100644 --- a/config/webhooks/validation_webhooks.yaml +++ b/config/webhooks/validation_webhooks.yaml @@ -129,3 +129,27 @@ webhooks: - cloudeventsources sideEffects: None timeoutSeconds: 10 +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: keda-admission-webhooks + namespace: keda + path: /validate-eventing-keda-sh-v1alpha1-clustercloudeventsource + failurePolicy: Ignore + matchPolicy: Equivalent + name: vclustercloudeventsource.kb.io + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - eventing.keda.sh + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - clustercloudeventsources + sideEffects: None + timeoutSeconds: 10 diff --git a/controllers/eventing/cloudeventsource_controller.go b/controllers/eventing/cloudeventsource_controller.go index 62c972076fd..5bb78f5e9ca 100644 --- a/controllers/eventing/cloudeventsource_controller.go +++ b/controllers/eventing/cloudeventsource_controller.go @@ -13,16 +13,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - +// +//nolint:dupl package eventing import ( "context" "sync" - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/tools/cache" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -32,7 +30,6 @@ import ( eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" "github.com/kedacore/keda/v2/pkg/eventemitter" "github.com/kedacore/keda/v2/pkg/metricscollector" - kedastatus "github.com/kedacore/keda/v2/pkg/status" "github.com/kedacore/keda/v2/pkg/util" ) @@ -60,56 +57,11 @@ func NewCloudEventSourceReconciler(c client.Client, e eventemitter.EventHandler) // +kubebuilder:rbac:groups=eventing.keda.sh,resources=cloudeventsources;cloudeventsources/status,verbs="*" // Reconcile performs reconciliation on the identified EventSource resource based on the request information passed, returns the result and an error (if any). + func (r *CloudEventSourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLogger := log.FromContext(ctx) - - // Fetch the EventSource instance cloudEventSource := &eventingv1alpha1.CloudEventSource{} - err := r.Client.Get(ctx, req.NamespacedName, cloudEventSource) - if err != nil { - if errors.IsNotFound(err) { - // Request eventSource not found, could have been deleted after reconcile request. - // Owned eventSource are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - reqLogger.Error(err, "failed to get EventSource") - return ctrl.Result{}, err - } - - reqLogger.Info("Reconciling EventSource") - - if !cloudEventSource.GetDeletionTimestamp().IsZero() { - return ctrl.Result{}, r.FinalizeEventSourceResource(ctx, reqLogger, cloudEventSource, req.NamespacedName.String()) - } - r.updatePromMetrics(cloudEventSource, req.NamespacedName.String()) - - // ensure finalizer is set on this CR - if err := r.EnsureEventSourceResourceFinalizer(ctx, reqLogger, cloudEventSource); err != nil { - return ctrl.Result{}, err - } - - // ensure Status Conditions are initialized - if !cloudEventSource.Status.Conditions.AreInitialized() { - conditions := eventingv1alpha1.GetCloudEventSourceInitializedConditions() - if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, cloudEventSource, conditions); err != nil { - return ctrl.Result{}, err - } - } - - eventSourceChanged, err := r.cloudEventSourceGenerationChanged(reqLogger, cloudEventSource) - if err != nil { - return ctrl.Result{}, err - } - - if eventSourceChanged { - if r.requestEventLoop(ctx, reqLogger, cloudEventSource) != nil { - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, nil + return Reconcile(ctx, reqLogger, r, req, cloudEventSource) } // SetupWithManager sets up the controller with the Manager. @@ -120,61 +72,19 @@ func (r *CloudEventSourceReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -// requestEventLoop tries to start EventLoop handler for the respective EventSource -func (r *CloudEventSourceReconciler) requestEventLoop(ctx context.Context, logger logr.Logger, eventSource *eventingv1alpha1.CloudEventSource) error { - logger.V(1).Info("Notify eventHandler of an update in eventSource") - - key, err := cache.MetaNamespaceKeyFunc(eventSource) - if err != nil { - logger.Error(err, "error getting key for eventSource") - return err - } - - if err = r.eventEmitter.HandleCloudEventSource(ctx, eventSource); err != nil { - return err - } - - // store CloudEventSource's current Generation - r.cloudEventSourceGenerations.Store(key, eventSource.Generation) - - return nil +func (r *CloudEventSourceReconciler) GetClient() client.Client { + return r.Client } -// stopEventLoop stops EventLoop handler for the respective EventSource -func (r *CloudEventSourceReconciler) stopEventLoop(logger logr.Logger, eventSource *eventingv1alpha1.CloudEventSource) error { - key, err := cache.MetaNamespaceKeyFunc(eventSource) - if err != nil { - logger.Error(err, "error getting key for eventSource") - return err - } - - if err := r.eventEmitter.DeleteCloudEventSource(eventSource); err != nil { - return err - } - // delete CloudEventSource's current Generation - r.cloudEventSourceGenerations.Delete(key) - return nil +func (r *CloudEventSourceReconciler) GetEventEmitter() eventemitter.EventHandler { + return r.eventEmitter } -// eventSourceGenerationChanged returns true if CloudEventSource's Generation was changed, ie. EventSource.Spec was changed -func (r *CloudEventSourceReconciler) cloudEventSourceGenerationChanged(logger logr.Logger, eventSource *eventingv1alpha1.CloudEventSource) (bool, error) { - key, err := cache.MetaNamespaceKeyFunc(eventSource) - if err != nil { - logger.Error(err, "error getting key for eventSource") - return true, err - } - - value, loaded := r.cloudEventSourceGenerations.Load(key) - if loaded { - generation := value.(int64) - if generation == eventSource.Generation { - return false, nil - } - } - return true, nil +func (r *CloudEventSourceReconciler) GetCloudEventSourceGeneration() *sync.Map { + return r.cloudEventSourceGenerations } -func (r *CloudEventSourceReconciler) updatePromMetrics(eventSource *eventingv1alpha1.CloudEventSource, namespacedName string) { +func (r *CloudEventSourceReconciler) UpdatePromMetrics(eventSource eventingv1alpha1.CloudEventSourceInterface, namespacedName string) { r.eventSourcePromMetricsLock.Lock() defer r.eventSourcePromMetricsLock.Unlock() @@ -182,8 +92,8 @@ func (r *CloudEventSourceReconciler) updatePromMetrics(eventSource *eventingv1al metricscollector.DecrementCRDTotal(metricscollector.CloudEventSourceResource, ns) } - metricscollector.IncrementCRDTotal(metricscollector.CloudEventSourceResource, eventSource.Namespace) - r.eventSourcePromMetricsMap[namespacedName] = eventSource.Namespace + metricscollector.IncrementCRDTotal(metricscollector.CloudEventSourceResource, eventSource.GetNamespace()) + r.eventSourcePromMetricsMap[namespacedName] = eventSource.GetNamespace() } // UpdatePromMetricsOnDelete is idempotent, so it can be called multiple times without side-effects diff --git a/controllers/eventing/cloudeventsource_finalizer.go b/controllers/eventing/cloudeventsource_finalizer.go deleted file mode 100644 index 03da520188a..00000000000 --- a/controllers/eventing/cloudeventsource_finalizer.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2023 The KEDA Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package eventing - -import ( - "context" - "fmt" - - "github.com/go-logr/logr" - - eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" - "github.com/kedacore/keda/v2/controllers/keda/util" -) - -const ( - cloudEventSourceFinalizer = "finalizer.keda.sh" - cloudEventSourceResourceType = "cloudEventSource" -) - -func (r *CloudEventSourceReconciler) EnsureEventSourceResourceFinalizer(ctx context.Context, logger logr.Logger, cloudEventSource *eventingv1alpha1.CloudEventSource) error { - if !util.Contains(cloudEventSource.GetFinalizers(), cloudEventSourceFinalizer) { - logger.Info(fmt.Sprintf("Adding Finalizer to %s %s/%s", cloudEventSourceResourceType, cloudEventSource.Namespace, cloudEventSource.Name)) - cloudEventSource.SetFinalizers(append(cloudEventSource.GetFinalizers(), cloudEventSourceFinalizer)) - - // Update CR - err := r.Update(ctx, cloudEventSource) - if err != nil { - logger.Error(err, fmt.Sprintf("Failed to update %s with a finalizer", cloudEventSourceResourceType), "finalizer", cloudEventSourceFinalizer) - return err - } - } - return nil -} - -func (r *CloudEventSourceReconciler) FinalizeEventSourceResource(ctx context.Context, logger logr.Logger, cloudEventSource *eventingv1alpha1.CloudEventSource, namespacedName string) error { - if util.Contains(cloudEventSource.GetFinalizers(), cloudEventSourceFinalizer) { - if err := r.stopEventLoop(logger, cloudEventSource); err != nil { - return err - } - cloudEventSource.SetFinalizers(util.Remove(cloudEventSource.GetFinalizers(), cloudEventSourceFinalizer)) - if err := r.Update(ctx, cloudEventSource); err != nil { - logger.Error(err, fmt.Sprintf("Failed to update %s after removing a finalizer", cloudEventSourceResourceType), "finalizer", cloudEventSourceFinalizer) - return err - } - - r.UpdatePromMetricsOnDelete(namespacedName) - } - - logger.Info(fmt.Sprintf("Successfully finalized %s", cloudEventSourceResourceType)) - return nil -} diff --git a/controllers/eventing/clustercloudeventsource_controller.go b/controllers/eventing/clustercloudeventsource_controller.go new file mode 100644 index 00000000000..0ccb26f811a --- /dev/null +++ b/controllers/eventing/clustercloudeventsource_controller.go @@ -0,0 +1,108 @@ +/* +Copyright 2024 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// +//nolint:dupl +package eventing + +import ( + "context" + "sync" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" + "github.com/kedacore/keda/v2/pkg/eventemitter" + "github.com/kedacore/keda/v2/pkg/metricscollector" + "github.com/kedacore/keda/v2/pkg/util" +) + +// ClusterCloudEventSourceReconciler reconciles a EventSource object +type ClusterCloudEventSourceReconciler struct { + client.Client + eventEmitter eventemitter.EventHandler + + clusterCloudEventSourceGenerations *sync.Map + eventSourcePromMetricsMap map[string]string + eventSourcePromMetricsLock *sync.Mutex +} + +// NewClusterCloudEventSourceReconciler creates a new ClusterCloudEventSourceReconciler +func NewClusterCloudEventSourceReconciler(c client.Client, e eventemitter.EventHandler) *ClusterCloudEventSourceReconciler { + return &ClusterCloudEventSourceReconciler{ + Client: c, + eventEmitter: e, + clusterCloudEventSourceGenerations: &sync.Map{}, + eventSourcePromMetricsMap: make(map[string]string), + eventSourcePromMetricsLock: &sync.Mutex{}, + } +} + +// +kubebuilder:rbac:groups=eventing.keda.sh,resources=clustercloudeventsources;clustercloudeventsources/status,verbs="*" + +// Reconcile performs reconciliation on the identified EventSource resource based on the request information passed, returns the result and an error (if any). +func (r *ClusterCloudEventSourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqLogger := log.FromContext(ctx) + cloudEventSource := &eventingv1alpha1.ClusterCloudEventSource{} + return Reconcile(ctx, reqLogger, r, req, cloudEventSource) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ClusterCloudEventSourceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&eventingv1alpha1.ClusterCloudEventSource{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithEventFilter(util.IgnoreOtherNamespaces()). + Complete(r) +} + +func (r *ClusterCloudEventSourceReconciler) GetClient() client.Client { + return r.Client +} + +func (r *ClusterCloudEventSourceReconciler) GetEventEmitter() eventemitter.EventHandler { + return r.eventEmitter +} + +func (r *ClusterCloudEventSourceReconciler) GetCloudEventSourceGeneration() *sync.Map { + return r.clusterCloudEventSourceGenerations +} + +func (r *ClusterCloudEventSourceReconciler) UpdatePromMetrics(eventSource eventingv1alpha1.CloudEventSourceInterface, namespacedName string) { + r.eventSourcePromMetricsLock.Lock() + defer r.eventSourcePromMetricsLock.Unlock() + + if ns, ok := r.eventSourcePromMetricsMap[namespacedName]; ok { + metricscollector.DecrementCRDTotal(metricscollector.CloudEventSourceResource, ns) + } + + metricscollector.IncrementCRDTotal(metricscollector.CloudEventSourceResource, eventSource.GetNamespace()) + r.eventSourcePromMetricsMap[namespacedName] = eventSource.GetNamespace() +} + +// UpdatePromMetricsOnDelete is idempotent, so it can be called multiple times without side-effects +func (r *ClusterCloudEventSourceReconciler) UpdatePromMetricsOnDelete(namespacedName string) { + r.eventSourcePromMetricsLock.Lock() + defer r.eventSourcePromMetricsLock.Unlock() + + if ns, ok := r.eventSourcePromMetricsMap[namespacedName]; ok { + metricscollector.DecrementCRDTotal(metricscollector.CloudEventSourceResource, ns) + } + + delete(r.eventSourcePromMetricsMap, namespacedName) +} diff --git a/controllers/eventing/finalizer.go b/controllers/eventing/finalizer.go new file mode 100644 index 00000000000..5c8a8f75297 --- /dev/null +++ b/controllers/eventing/finalizer.go @@ -0,0 +1,64 @@ +/* +Copyright 2024 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eventing + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kedacore/keda/v2/controllers/keda/util" +) + +const ( + cloudEventSourceFinalizer = "finalizer.keda.sh" +) + +func EnsureCloudEventSourceResourceFinalizer(ctx context.Context, logger logr.Logger, r cloudEventSourceReconcilerInterface, cloudEventSourceResource client.Object) error { + if !util.Contains(cloudEventSourceResource.GetFinalizers(), cloudEventSourceFinalizer) { + logger.Info(fmt.Sprintf("Adding Finalizer for the %s", cloudEventSourceResource.GetName())) + cloudEventSourceResource.SetFinalizers(append(cloudEventSourceResource.GetFinalizers(), cloudEventSourceFinalizer)) + + // Update CR + err := r.GetClient().Update(ctx, cloudEventSourceResource) + if err != nil { + logger.Error(err, fmt.Sprintf("Failed to update %s with a finalizer", cloudEventSourceResource.GetName()), "finalizer", cloudEventSourceFinalizer) + return err + } + } + return nil +} + +func FinalizeCloudEventSourceResource(ctx context.Context, logger logr.Logger, r cloudEventSourceReconcilerInterface, cloudEventSourceResource client.Object, namespacedName string) error { + if util.Contains(cloudEventSourceResource.GetFinalizers(), cloudEventSourceFinalizer) { + if err := StopEventLoop(logger, r, cloudEventSourceResource); err != nil { + return err + } + cloudEventSourceResource.SetFinalizers(util.Remove(cloudEventSourceResource.GetFinalizers(), cloudEventSourceFinalizer)) + if err := r.GetClient().Update(ctx, cloudEventSourceResource); err != nil { + logger.Error(err, fmt.Sprintf("Failed to update %s after removing a finalizer", cloudEventSourceResource.GetName()), "finalizer", cloudEventSourceFinalizer) + return err + } + + r.UpdatePromMetricsOnDelete(namespacedName) + } + + logger.Info(fmt.Sprintf("Successfully finalized %s", cloudEventSourceResource.GetName())) + return nil +} diff --git a/controllers/eventing/reconciler.go b/controllers/eventing/reconciler.go new file mode 100644 index 00000000000..6461ee16a11 --- /dev/null +++ b/controllers/eventing/reconciler.go @@ -0,0 +1,141 @@ +/* +Copyright 2024 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eventing + +import ( + "context" + "sync" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/cache" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" + "github.com/kedacore/keda/v2/pkg/eventemitter" + kedastatus "github.com/kedacore/keda/v2/pkg/status" +) + +type cloudEventSourceReconcilerInterface interface { + GetClient() client.Client + GetEventEmitter() eventemitter.EventHandler + GetCloudEventSourceGeneration() *sync.Map + UpdatePromMetrics(eventSource eventingv1alpha1.CloudEventSourceInterface, namespacedName string) + UpdatePromMetricsOnDelete(namespacedName string) +} + +func Reconcile(ctx context.Context, reqLogger logr.Logger, r cloudEventSourceReconcilerInterface, req ctrl.Request, cloudEventSource eventingv1alpha1.CloudEventSourceInterface) (ctrl.Result, error) { + err := r.GetClient().Get(ctx, req.NamespacedName, cloudEventSource) + if err != nil { + if errors.IsNotFound(err) { + // Request eventSource not found, could have been deleted after reconcile request. + // Owned eventSource are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + reqLogger.Error(err, "failed to get EventSource") + return ctrl.Result{}, err + } + + reqLogger.Info("Reconciling CloudEventSource") + + if !cloudEventSource.GetDeletionTimestamp().IsZero() { + return ctrl.Result{}, FinalizeCloudEventSourceResource(ctx, reqLogger, r, cloudEventSource, req.NamespacedName.String()) + } + r.UpdatePromMetrics(cloudEventSource, req.NamespacedName.String()) + + // ensure finalizer is set on this CR + if err := EnsureCloudEventSourceResourceFinalizer(ctx, reqLogger, r, cloudEventSource); err != nil { + return ctrl.Result{}, err + } + + // ensure Status Conditions are initialized + if !cloudEventSource.GetStatus().Conditions.AreInitialized() { + conditions := eventingv1alpha1.GetCloudEventSourceInitializedConditions() + if err := kedastatus.SetStatusConditions(ctx, r.GetClient(), reqLogger, cloudEventSource, conditions); err != nil { + return ctrl.Result{}, err + } + } + + eventSourceChanged, err := CloudEventSourceGenerationChanged(reqLogger, r, cloudEventSource) + if err != nil { + return ctrl.Result{}, err + } + + if eventSourceChanged { + if RequestEventLoop(ctx, reqLogger, r, cloudEventSource) != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +// requestEventLoop tries to start EventLoop handler for the respective EventSource +func RequestEventLoop(ctx context.Context, logger logr.Logger, r cloudEventSourceReconcilerInterface, eventSourceI eventingv1alpha1.CloudEventSourceInterface) error { + logger.V(1).Info("Notify eventHandler of an update in eventSource") + + key, err := cache.MetaNamespaceKeyFunc(eventSourceI) + if err != nil { + logger.Error(err, "error getting key for eventSource") + return err + } + + if err = r.GetEventEmitter().HandleCloudEventSource(ctx, eventSourceI); err != nil { + return err + } + + // store CloudEventSource's current Generation + r.GetCloudEventSourceGeneration().Store(key, eventSourceI.GetGeneration()) + return nil +} + +// stopEventLoop stops EventLoop handler for the respective EventSource +func StopEventLoop(logger logr.Logger, r cloudEventSourceReconcilerInterface, obj client.Object) error { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + logger.Error(err, "error getting key for eventSource") + return err + } + + if err := r.GetEventEmitter().DeleteCloudEventSource(obj.(eventingv1alpha1.CloudEventSourceInterface)); err != nil { + return err + } + // delete CloudEventSource's current Generation + r.GetCloudEventSourceGeneration().Delete(key) + return nil +} + +// eventSourceGenerationChanged returns true if CloudEventSource's Generation was changed, ie. EventSource.Spec was changed +func CloudEventSourceGenerationChanged(logger logr.Logger, r cloudEventSourceReconcilerInterface, eventSourceI eventingv1alpha1.CloudEventSourceInterface) (bool, error) { + key, err := cache.MetaNamespaceKeyFunc(eventSourceI) + if err != nil { + logger.Error(err, "error getting key for eventSource") + return true, err + } + + value, loaded := r.GetCloudEventSourceGeneration().Load(key) + if loaded { + generation := value.(int64) + if generation == eventSourceI.GetGeneration() { + return false, nil + } + } + return true, nil +} diff --git a/pkg/eventemitter/eventemitter.go b/pkg/eventemitter/eventemitter.go index a46480ff537..91d57f3ca4d 100644 --- a/pkg/eventemitter/eventemitter.go +++ b/pkg/eventemitter/eventemitter.go @@ -71,8 +71,8 @@ type EventEmitter struct { // EventHandler defines the behavior for EventEmitter clients type EventHandler interface { - DeleteCloudEventSource(cloudEventSource *eventingv1alpha1.CloudEventSource) error - HandleCloudEventSource(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource) error + DeleteCloudEventSource(cloudEventSource eventingv1alpha1.CloudEventSourceInterface) error + HandleCloudEventSource(ctx context.Context, cloudEventSource eventingv1alpha1.CloudEventSourceInterface) error Emit(object runtime.Object, namesapce string, eventType string, cloudeventType eventingv1alpha1.CloudEventType, reason string, message string) } @@ -112,20 +112,20 @@ func NewEventEmitter(client client.Client, recorder record.EventRecorder, cluste } } -func initializeLogger(cloudEventSource *eventingv1alpha1.CloudEventSource, cloudEventSourceEmitterName string) logr.Logger { - return logf.Log.WithName(cloudEventSourceEmitterName).WithValues("type", cloudEventSource.Kind, "namespace", cloudEventSource.Namespace, "name", cloudEventSource.Name) +func initializeLogger(cloudEventSourceI eventingv1alpha1.CloudEventSourceInterface, cloudEventSourceEmitterName string) logr.Logger { + return logf.Log.WithName(cloudEventSourceEmitterName).WithValues("type", cloudEventSourceI.GetObjectKind(), "namespace", cloudEventSourceI.GetNamespace(), "name", cloudEventSourceI.GetName()) } // HandleCloudEventSource will create CloudEventSource handlers that defined in spec and start an event loop once handlers // are created successfully. -func (e *EventEmitter) HandleCloudEventSource(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource) error { - e.createEventHandlers(ctx, cloudEventSource) +func (e *EventEmitter) HandleCloudEventSource(ctx context.Context, cloudEventSourceI eventingv1alpha1.CloudEventSourceInterface) error { + e.createEventHandlers(ctx, cloudEventSourceI) - if !e.checkIfEventHandlersExist(cloudEventSource) { - return fmt.Errorf("no CloudEventSource handler is created for %s/%s", cloudEventSource.Namespace, cloudEventSource.Name) + if !e.checkIfEventHandlersExist(cloudEventSourceI) { + return fmt.Errorf("no CloudEventSource handler is created for %s/%s", cloudEventSourceI.GetNamespace(), cloudEventSourceI.GetName()) } - key := cloudEventSource.GenerateIdentifier() + key := cloudEventSourceI.GenerateIdentifier() cancelCtx, cancel := context.WithCancel(ctx) // cancel the outdated EventLoop for the same CloudEventSource (if exists) @@ -137,7 +137,7 @@ func (e *EventEmitter) HandleCloudEventSource(ctx context.Context, cloudEventSou } e.eventLoopContexts.Store(key, cancel) } else { - if updateErr := e.setCloudEventSourceStatusActive(ctx, cloudEventSource); updateErr != nil { + if updateErr := e.setCloudEventSourceStatusActive(ctx, cloudEventSourceI); updateErr != nil { e.log.Error(updateErr, "Failed to update CloudEventSource status") return updateErr } @@ -148,14 +148,15 @@ func (e *EventEmitter) HandleCloudEventSource(ctx context.Context, cloudEventSou // passing deep copy of CloudEventSource to the eventLoop go routines, it's a precaution to not have global objects shared between threads e.log.V(1).Info("Start CloudEventSource loop.") - go e.startEventLoop(cancelCtx, cloudEventSource.DeepCopy(), eventingMutex) + go e.startEventLoop(cancelCtx, cloudEventSourceI.DeepCopyObject().(eventingv1alpha1.CloudEventSourceInterface), eventingMutex) return nil } // DeleteCloudEventSource will stop the event loop and clean event handlers in cache. -func (e *EventEmitter) DeleteCloudEventSource(cloudEventSource *eventingv1alpha1.CloudEventSource) error { +func (e *EventEmitter) DeleteCloudEventSource(cloudEventSource eventingv1alpha1.CloudEventSourceInterface) error { key := cloudEventSource.GenerateIdentifier() result, ok := e.eventLoopContexts.Load(key) + e.log.V(1).Info("successfully DeleteCloudEventSourceDeleteCloudEventSourceDeleteCloudEventSource", "key", key) if ok { cancel, ok := result.(context.CancelFunc) if ok { @@ -164,7 +165,7 @@ func (e *EventEmitter) DeleteCloudEventSource(cloudEventSource *eventingv1alpha1 e.eventLoopContexts.Delete(key) e.clearEventHandlersCache(cloudEventSource) } else { - e.log.V(1).Info("CloudEventSource was not found in controller cache", "key", key) + e.log.V(1).Info("successfully CloudEventSource was not found in controller cache", "key", key) } return nil @@ -172,32 +173,33 @@ func (e *EventEmitter) DeleteCloudEventSource(cloudEventSource *eventingv1alpha1 // createEventHandlers will create different handler as defined in CloudEventSource, and store them in cache for repeated // use in the loop. -func (e *EventEmitter) createEventHandlers(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource) { +func (e *EventEmitter) createEventHandlers(ctx context.Context, cloudEventSourceI eventingv1alpha1.CloudEventSourceInterface) { e.eventHandlersCacheLock.Lock() e.eventFilterCacheLock.Lock() defer e.eventHandlersCacheLock.Unlock() defer e.eventFilterCacheLock.Unlock() - key := cloudEventSource.GenerateIdentifier() + key := cloudEventSourceI.GenerateIdentifier() + spec := cloudEventSourceI.GetSpec() - clusterName := cloudEventSource.Spec.ClusterName + clusterName := spec.ClusterName if clusterName == "" { clusterName = e.clusterName } // Resolve auth related - authParams, podIdentity, err := resolver.ResolveAuthRefAndPodIdentity(ctx, e.client, e.log, cloudEventSource.Spec.AuthenticationRef, nil, cloudEventSource.Namespace, e.secretsLister) + authParams, podIdentity, err := resolver.ResolveAuthRefAndPodIdentity(ctx, e.client, e.log, spec.AuthenticationRef, nil, cloudEventSourceI.GetNamespace(), e.secretsLister) if err != nil { - e.log.Error(err, "error resolving auth params", "cloudEventSource", cloudEventSource) + e.log.Error(err, "error resolving auth params", "cloudEventSource", cloudEventSourceI) return } // Create EventFilter from CloudEventSource - e.eventFilterCache[key] = NewEventFilter(cloudEventSource.Spec.EventSubscription.IncludedEventTypes, cloudEventSource.Spec.EventSubscription.ExcludedEventTypes) + e.eventFilterCache[key] = NewEventFilter(spec.EventSubscription.IncludedEventTypes, spec.EventSubscription.ExcludedEventTypes) // Create different event destinations here - if cloudEventSource.Spec.Destination.HTTP != nil { - eventHandler, err := NewCloudEventHTTPHandler(ctx, clusterName, cloudEventSource.Spec.Destination.HTTP.URI, initializeLogger(cloudEventSource, "cloudevent_http")) + if spec.Destination.HTTP != nil { + eventHandler, err := NewCloudEventHTTPHandler(ctx, clusterName, spec.Destination.HTTP.URI, initializeLogger(cloudEventSourceI, "cloudevent_http")) if err != nil { e.log.Error(err, "create CloudEvent HTTP handler failed") return @@ -211,8 +213,8 @@ func (e *EventEmitter) createEventHandlers(ctx context.Context, cloudEventSource return } - if cloudEventSource.Spec.Destination.AzureEventGridTopic != nil { - eventHandler, err := NewAzureEventGridTopicHandler(ctx, clusterName, cloudEventSource.Spec.Destination.AzureEventGridTopic, authParams, podIdentity, initializeLogger(cloudEventSource, "azure_event_grid_topic")) + if spec.Destination.AzureEventGridTopic != nil { + eventHandler, err := NewAzureEventGridTopicHandler(ctx, clusterName, spec.Destination.AzureEventGridTopic, authParams, podIdentity, initializeLogger(cloudEventSourceI, "azure_event_grid_topic")) if err != nil { e.log.Error(err, "create Azure Event Grid handler failed") return @@ -226,40 +228,41 @@ func (e *EventEmitter) createEventHandlers(ctx context.Context, cloudEventSource return } - e.log.Info("No destionation is defined in CloudEventSource", "CloudEventSource", cloudEventSource.Name) + e.log.Info("No destionation is defined in CloudEventSource", "CloudEventSource", cloudEventSourceI.GetName()) } // clearEventHandlersCache will clear all event handlers that created by the passing CloudEventSource -func (e *EventEmitter) clearEventHandlersCache(cloudEventSource *eventingv1alpha1.CloudEventSource) { +func (e *EventEmitter) clearEventHandlersCache(cloudEventSource eventingv1alpha1.CloudEventSourceInterface) { e.eventHandlersCacheLock.Lock() defer e.eventHandlersCacheLock.Unlock() e.eventFilterCacheLock.Lock() defer e.eventFilterCacheLock.Unlock() + spec := cloudEventSource.GetSpec() key := cloudEventSource.GenerateIdentifier() delete(e.eventFilterCache, key) // Clear different event destination here. - if cloudEventSource.Spec.Destination.HTTP != nil { + if spec.Destination.HTTP != nil { eventHandlerKey := newEventHandlerKey(key, cloudEventHandlerTypeHTTP) if eventHandler, found := e.eventHandlersCache[eventHandlerKey]; found { eventHandler.CloseHandler() - delete(e.eventHandlersCache, key) + delete(e.eventHandlersCache, eventHandlerKey) } } - if cloudEventSource.Spec.Destination.AzureEventGridTopic != nil { + if spec.Destination.AzureEventGridTopic != nil { eventHandlerKey := newEventHandlerKey(key, cloudEventHandlerTypeAzureEventGridTopic) if eventHandler, found := e.eventHandlersCache[eventHandlerKey]; found { eventHandler.CloseHandler() - delete(e.eventHandlersCache, key) + delete(e.eventHandlersCache, eventHandlerKey) } } } // checkIfEventHandlersExist will check if the event handlers that were created by passing CloudEventSource exist -func (e *EventEmitter) checkIfEventHandlersExist(cloudEventSource *eventingv1alpha1.CloudEventSource) bool { +func (e *EventEmitter) checkIfEventHandlersExist(cloudEventSource eventingv1alpha1.CloudEventSourceInterface) bool { e.eventHandlersCacheLock.RLock() defer e.eventHandlersCacheLock.RUnlock() @@ -273,40 +276,41 @@ func (e *EventEmitter) checkIfEventHandlersExist(cloudEventSource *eventingv1alp return false } -func (e *EventEmitter) startEventLoop(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource, cloudEventSourceMutex sync.Locker) { +func (e *EventEmitter) startEventLoop(ctx context.Context, cloudEventSourceI eventingv1alpha1.CloudEventSourceInterface, cloudEventSourceMutex sync.Locker) { + e.log.V(1).Info("Start CloudEventSource loop.", "name", cloudEventSourceI.GetName()) for { select { case eventData := <-e.cloudEventProcessingChan: - e.log.V(1).Info("Consuming events from CloudEventSource.") + e.log.V(1).Info("Consuming events from CloudEventSource.", "name", cloudEventSourceI.GetName()) e.emitEventByHandler(eventData) - e.checkEventHandlers(ctx, cloudEventSource, cloudEventSourceMutex) - metricscollector.RecordCloudEventQueueStatus(cloudEventSource.Namespace, len(e.cloudEventProcessingChan)) + e.checkEventHandlers(ctx, cloudEventSourceI, cloudEventSourceMutex) + metricscollector.RecordCloudEventQueueStatus(cloudEventSourceI.GetNamespace(), len(e.cloudEventProcessingChan)) case <-ctx.Done(): e.log.V(1).Info("CloudEventSource loop has stopped.") - metricscollector.RecordCloudEventQueueStatus(cloudEventSource.Namespace, len(e.cloudEventProcessingChan)) + metricscollector.RecordCloudEventQueueStatus(cloudEventSourceI.GetNamespace(), len(e.cloudEventProcessingChan)) return } } } // checkEventHandlers will check each eventhandler active status -func (e *EventEmitter) checkEventHandlers(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource, cloudEventSourceMutex sync.Locker) { +func (e *EventEmitter) checkEventHandlers(ctx context.Context, cloudEventSourceI eventingv1alpha1.CloudEventSourceInterface, cloudEventSourceMutex sync.Locker) { e.log.V(1).Info("Checking event handlers status.") cloudEventSourceMutex.Lock() defer cloudEventSourceMutex.Unlock() // Get the latest object - err := e.client.Get(ctx, types.NamespacedName{Name: cloudEventSource.Name, Namespace: cloudEventSource.Namespace}, cloudEventSource) + err := e.client.Get(ctx, types.NamespacedName{Name: cloudEventSourceI.GetName(), Namespace: cloudEventSourceI.GetNamespace()}, cloudEventSourceI) if err != nil { - e.log.Error(err, "error getting cloudEventSource", "cloudEventSource", cloudEventSource) + e.log.Error(err, "error getting cloudEventSource", "cloudEventSource", cloudEventSourceI) return } - keyPrefix := cloudEventSource.GenerateIdentifier() + keyPrefix := cloudEventSourceI.GenerateIdentifier() needUpdate := false - cloudEventSourceStatus := cloudEventSource.Status.DeepCopy() + cloudEventSourceStatus := cloudEventSourceI.GetStatus().DeepCopy() for k, v := range e.eventHandlersCache { - e.log.V(1).Info("Checking event handler status.", "handler", k, "status", cloudEventSource.Status.Conditions.GetActiveCondition().Status) + e.log.V(1).Info("Checking event handler status.", "handler", k, "status", cloudEventSourceI.GetStatus().Conditions.GetActiveCondition().Status) if strings.Contains(k, keyPrefix) { - if v.GetActiveStatus() != cloudEventSource.Status.Conditions.GetActiveCondition().Status { + if v.GetActiveStatus() != cloudEventSourceI.GetStatus().Conditions.GetActiveCondition().Status { needUpdate = true cloudEventSourceStatus.Conditions.SetActiveCondition( metav1.ConditionFalse, @@ -316,9 +320,8 @@ func (e *EventEmitter) checkEventHandlers(ctx context.Context, cloudEventSource } } } - if needUpdate { - if updateErr := e.updateCloudEventSourceStatus(ctx, cloudEventSource, cloudEventSourceStatus); updateErr != nil { + if updateErr := e.updateCloudEventSourceStatus(ctx, cloudEventSourceI, cloudEventSourceStatus); updateErr != nil { e.log.Error(updateErr, "Failed to update CloudEventSource status") } } @@ -387,7 +390,6 @@ func (e *EventEmitter) emitEventByHandler(eventData eventdata.EventData) { return } } - eventData.HandlerKey = key if handler.GetActiveStatus() == metav1.ConditionTrue { go handler.EmitEvent(eventData, e.emitErrorHandle) @@ -425,33 +427,36 @@ func (e *EventEmitter) emitErrorHandle(eventData eventdata.EventData, err error) e.enqueueEventData(requeueData) } -func (e *EventEmitter) setCloudEventSourceStatusActive(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource) error { - cloudEventSourceStatus := cloudEventSource.Status.DeepCopy() +func (e *EventEmitter) setCloudEventSourceStatusActive(ctx context.Context, cloudEventSourceI eventingv1alpha1.CloudEventSourceInterface) error { + cloudEventSourceStatus := cloudEventSourceI.GetStatus() cloudEventSourceStatus.Conditions.SetActiveCondition( metav1.ConditionTrue, eventingv1alpha1.CloudEventSourceConditionActiveReason, eventingv1alpha1.CloudEventSourceConditionActiveMessage, ) - return e.updateCloudEventSourceStatus(ctx, cloudEventSource, cloudEventSourceStatus) + return e.updateCloudEventSourceStatus(ctx, cloudEventSourceI, cloudEventSourceStatus) } -func (e *EventEmitter) updateCloudEventSourceStatus(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource, cloudEventSourceStatus *eventingv1alpha1.CloudEventSourceStatus) error { - e.log.V(1).Info("Updating CloudEventSource status", "CloudEventSource", cloudEventSource.Name) +func (e *EventEmitter) updateCloudEventSourceStatus(ctx context.Context, cloudEventSourceI eventingv1alpha1.CloudEventSourceInterface, cloudEventSourceStatus *eventingv1alpha1.CloudEventSourceStatus) error { + e.log.V(1).Info("Updating CloudEventSource status", "CloudEventSource", cloudEventSourceI.GetName()) transform := func(runtimeObj client.Object, target interface{}) error { - status, ok := target.(*eventingv1alpha1.CloudEventSourceStatus) + status, ok := target.(eventingv1alpha1.CloudEventSourceStatus) if !ok { return fmt.Errorf("transform target is not eventingv1alpha1.CloudEventSourceStatus type %v", target) } switch obj := runtimeObj.(type) { case *eventingv1alpha1.CloudEventSource: - e.log.V(1).Info("New CloudEventSource status", "status", *status) - obj.Status = *status + e.log.V(1).Info("New CloudEventSource status", "status", status) + obj.Status = status + case *eventingv1alpha1.ClusterCloudEventSource: + e.log.V(1).Info("New ClusterCloudEventSource status", "status", status) + obj.Status = status default: } return nil } - if err := kedastatus.TransformObject(ctx, e.client, e.log, cloudEventSource, cloudEventSourceStatus, transform); err != nil { + if err := kedastatus.TransformObject(ctx, e.client, e.log, cloudEventSourceI, *cloudEventSourceStatus, transform); err != nil { e.log.Error(err, "Failed to update CloudEventSourceStatus") return err } diff --git a/pkg/mock/mock_eventemitter/mock_interface.go b/pkg/mock/mock_eventemitter/mock_interface.go index 81494706708..39dc6f7d3d2 100644 --- a/pkg/mock/mock_eventemitter/mock_interface.go +++ b/pkg/mock/mock_eventemitter/mock_interface.go @@ -44,7 +44,7 @@ func (m *MockEventHandler) EXPECT() *MockEventHandlerMockRecorder { } // DeleteCloudEventSource mocks base method. -func (m *MockEventHandler) DeleteCloudEventSource(cloudEventSource *v1alpha1.CloudEventSource) error { +func (m *MockEventHandler) DeleteCloudEventSource(cloudEventSource v1alpha1.CloudEventSourceInterface) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteCloudEventSource", cloudEventSource) ret0, _ := ret[0].(error) @@ -70,7 +70,7 @@ func (mr *MockEventHandlerMockRecorder) Emit(object, namesapce, eventType, cloud } // HandleCloudEventSource mocks base method. -func (m *MockEventHandler) HandleCloudEventSource(ctx context.Context, cloudEventSource *v1alpha1.CloudEventSource) error { +func (m *MockEventHandler) HandleCloudEventSource(ctx context.Context, cloudEventSource v1alpha1.CloudEventSourceInterface) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HandleCloudEventSource", ctx, cloudEventSource) ret0, _ := ret[0].(error) diff --git a/pkg/status/status.go b/pkg/status/status.go index 852843afac9..8c42190d5f7 100755 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -44,6 +44,8 @@ func SetStatusConditions(ctx context.Context, client runtimeclient.StatusClient, obj.Status.Conditions = *conditions case *eventingv1alpha1.CloudEventSource: obj.Status.Conditions = *conditions + case *eventingv1alpha1.ClusterCloudEventSource: + obj.Status.Conditions = *conditions default: } return nil @@ -184,6 +186,12 @@ func TransformObject(ctx context.Context, client runtimeclient.StatusClient, log logger.Error(err, "failed to patch CloudEventSource") return err } + case *eventingv1alpha1.ClusterCloudEventSource: + patch = runtimeclient.MergeFrom(obj.DeepCopy()) + if err := transform(obj, target); err != nil { + logger.Error(err, "failed to patch ClusterCloudEventSource") + return err + } default: err := fmt.Errorf("unknown scalable object type %v", obj) logger.Error(err, "failed to patch Objects") diff --git a/tests/internals/cloudevent_source/cloudevent_source_test.go b/tests/internals/cloudevent_source/cloudevent_source_test.go index a5fe794c209..b1001c7c1e3 100644 --- a/tests/internals/cloudevent_source/cloudevent_source_test.go +++ b/tests/internals/cloudevent_source/cloudevent_source_test.go @@ -25,82 +25,43 @@ const ( var _ = godotenv.Load("../../.env") var ( - namespace = fmt.Sprintf("%s-ns", testName) - scaledObjectName = fmt.Sprintf("%s-so", testName) - deploymentName = fmt.Sprintf("%s-d", testName) - clientName = fmt.Sprintf("%s-client", testName) - cloudeventSourceName = fmt.Sprintf("%s-ce", testName) - cloudeventSourceErrName = fmt.Sprintf("%s-ce-err", testName) - cloudeventSourceErrName2 = fmt.Sprintf("%s-ce-err2", testName) - cloudEventHTTPReceiverName = fmt.Sprintf("%s-cloudevent-http-receiver", testName) - cloudEventHTTPServiceName = fmt.Sprintf("%s-cloudevent-http-service", testName) - cloudEventHTTPServiceURL = fmt.Sprintf("http://%s.%s.svc.cluster.local:8899", cloudEventHTTPServiceName, namespace) - clusterName = "test-cluster" - expectedSubject = fmt.Sprintf("/%s/%s/scaledobject/%s", clusterName, namespace, scaledObjectName) - expectedSource = fmt.Sprintf("/%s/keda/keda", clusterName) - lastCloudEventTime = time.Now() + namespace = fmt.Sprintf("%s-ns", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + deploymentName = fmt.Sprintf("%s-d", testName) + clientName = fmt.Sprintf("%s-client", testName) + cloudeventSourceName = fmt.Sprintf("%s-ce", testName) + cloudeventSourceErrName = fmt.Sprintf("%s-ce-err", testName) + cloudeventSourceErrName2 = fmt.Sprintf("%s-ce-err2", testName) + clusterCloudeventSourceName = fmt.Sprintf("%s-cce", testName) + clusterCloudeventSourceErrName = fmt.Sprintf("%s-cce-err", testName) + clusterCloudeventSourceErrName2 = fmt.Sprintf("%s-cce-err2", testName) + cloudEventHTTPReceiverName = fmt.Sprintf("%s-cloudevent-http-receiver", testName) + cloudEventHTTPServiceName = fmt.Sprintf("%s-cloudevent-http-service", testName) + cloudEventHTTPServiceURL = fmt.Sprintf("http://%s.%s.svc.cluster.local:8899", cloudEventHTTPServiceName, namespace) + clusterName = "test-cluster" + expectedSubject = fmt.Sprintf("/%s/%s/scaledobject/%s", clusterName, namespace, scaledObjectName) + expectedSource = fmt.Sprintf("/%s/keda/keda", clusterName) + lastCloudEventTime = time.Now() ) type templateData struct { - TestNamespace string - ScaledObject string - DeploymentName string - ClientName string - CloudEventSourceName string - CloudeventSourceErrName string - CloudeventSourceErrName2 string - CloudEventHTTPReceiverName string - CloudEventHTTPServiceName string - CloudEventHTTPServiceURL string - ClusterName string + TestNamespace string + ScaledObject string + DeploymentName string + ClientName string + CloudEventSourceName string + CloudeventSourceErrName string + CloudeventSourceErrName2 string + ClusterCloudEventSourceName string + ClusterCloudeventSourceErrName string + ClusterCloudeventSourceErrName2 string + CloudEventHTTPReceiverName string + CloudEventHTTPServiceName string + CloudEventHTTPServiceURL string + ClusterName string } const ( - cloudEventSourceTemplate = ` - apiVersion: eventing.keda.sh/v1alpha1 - kind: CloudEventSource - metadata: - name: {{.CloudEventSourceName}} - namespace: {{.TestNamespace}} - spec: - clusterName: {{.ClusterName}} - destination: - http: - uri: {{.CloudEventHTTPServiceURL}} - ` - - cloudEventSourceWithExcludeTemplate = ` - apiVersion: eventing.keda.sh/v1alpha1 - kind: CloudEventSource - metadata: - name: {{.CloudEventSourceName}} - namespace: {{.TestNamespace}} - spec: - clusterName: {{.ClusterName}} - destination: - http: - uri: {{.CloudEventHTTPServiceURL}} - eventSubscription: - excludedEventTypes: - - keda.scaledobject.failed.v1 - ` - - cloudEventSourceWithIncludeTemplate = ` - apiVersion: eventing.keda.sh/v1alpha1 - kind: CloudEventSource - metadata: - name: {{.CloudEventSourceName}} - namespace: {{.TestNamespace}} - spec: - clusterName: {{.ClusterName}} - destination: - http: - uri: {{.CloudEventHTTPServiceURL}} - eventSubscription: - includedEventTypes: - - keda.scaledobject.failed.v1 - ` - cloudEventHTTPServiceTemplate = ` apiVersion: v1 kind: Service @@ -179,6 +140,51 @@ spec: - -c - "exec tail -f /dev/null"` + cloudEventSourceTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: CloudEventSource + metadata: + name: {{.CloudEventSourceName}} + namespace: {{.TestNamespace}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + ` + + cloudEventSourceWithExcludeTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: CloudEventSource + metadata: + name: {{.CloudEventSourceName}} + namespace: {{.TestNamespace}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + excludedEventTypes: + - keda.scaledobject.failed.v1 + ` + + cloudEventSourceWithIncludeTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: CloudEventSource + metadata: + name: {{.CloudEventSourceName}} + namespace: {{.TestNamespace}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + includedEventTypes: + - keda.scaledobject.failed.v1 + ` + cloudEventSourceWithErrTypeTemplate = ` apiVersion: eventing.keda.sh/v1alpha1 kind: CloudEventSource @@ -262,6 +268,80 @@ spec: end: 5 * * * * desiredReplicas: '4' ` + + clusterCloudEventSourceTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: ClusterCloudEventSource + metadata: + name: {{.ClusterCloudEventSourceName}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + ` + + clusterCloudEventSourceWithExcludeTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: ClusterCloudEventSource + metadata: + name: {{.ClusterCloudEventSourceName}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + excludedEventTypes: + - keda.scaledobject.failed.v1 + ` + + clusterCloudEventSourceWithIncludeTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: ClusterCloudEventSource + metadata: + name: {{.ClusterCloudEventSourceName}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + includedEventTypes: + - keda.scaledobject.failed.v1 + ` + + clusterCloudEventSourceWithErrTypeTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: ClusterCloudEventSource + metadata: + name: {{.ClusterCloudeventSourceErrName}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + includedEventTypes: + - keda.scaledobject.failed.v2 + ` + + clusterCloudEventSourceWithErrTypeTemplate2 = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: ClusterCloudEventSource + metadata: + name: {{.ClusterCloudeventSourceErrName2}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + includedEventTypes: + - keda.scaledobject.failed.v1 + excludedEventTypes: + - keda.scaledobject.failed.v1 + ` ) func TestScaledObjectGeneral(t *testing.T) { @@ -274,18 +354,31 @@ func TestScaledObjectGeneral(t *testing.T) { assert.True(t, WaitForAllPodRunningInNamespace(t, kc, namespace, 5, 20), "all pods should be running") - testErrEventSourceEmitValue(t, kc, data) + testErrEventSourceEmitValue(t, kc, data, true) testEventSourceEmitValue(t, kc, data) - testErrEventSourceExcludeValue(t, kc, data) - testErrEventSourceIncludeValue(t, kc, data) - testErrEventSourceCreation(t, kc, data) + testErrEventSourceExcludeValue(t, kc, data, true) + testErrEventSourceIncludeValue(t, kc, data, true) + testErrEventSourceCreation(t, kc, data, true) + + testErrEventSourceEmitValue(t, kc, data, false) + testErrEventSourceExcludeValue(t, kc, data, false) + testErrEventSourceIncludeValue(t, kc, data, false) + testErrEventSourceCreation(t, kc, data, false) DeleteKubernetesResources(t, namespace, data, templates) } // tests error events emitted -func testErrEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data templateData) { +func testErrEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data templateData, isClusterScope bool) { + ceTemplate := "" + if isClusterScope { + ceTemplate = clusterCloudEventSourceTemplate + } else { + ceTemplate = cloudEventSourceTemplate + } + t.Log("--- test emitting eventsource about scaledobject err---") + KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", ceTemplate) KubectlApplyWithTemplate(t, data, "scaledObjectErrTemplate", scaledObjectErrTemplate) // wait 15 seconds to ensure event propagation @@ -331,6 +424,8 @@ func testErrEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data tem } } assert.NotEmpty(t, foundEvents) + KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", ceTemplate) + t.Log("--- testErrEventSourceEmitValuetestErrEventSourceEmitValuer---", "cloud event time", lastCloudEventTime) } func testEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data templateData) { @@ -377,11 +472,17 @@ func testEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data templa } // tests error events not emitted by -func testErrEventSourceExcludeValue(t *testing.T, _ *kubernetes.Clientset, data templateData) { - t.Log("--- test emitting eventsource about scaledobject err with exclude filter---") +func testErrEventSourceExcludeValue(t *testing.T, _ *kubernetes.Clientset, data templateData, isClusterScope bool) { + t.Log("--- test emitting eventsource about scaledobject err with exclude filter---", "cloud event time", lastCloudEventTime) + + ceTemplate := "" + if isClusterScope { + ceTemplate = clusterCloudEventSourceWithExcludeTemplate + } else { + ceTemplate = cloudEventSourceWithExcludeTemplate + } - KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) - KubectlApplyWithTemplate(t, data, "cloudEventSourceWithExcludeTemplate", cloudEventSourceWithExcludeTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceWithExcludeTemplate", ceTemplate) KubectlApplyWithTemplate(t, data, "scaledObjectErrTemplate", scaledObjectErrTemplate) // wait 15 seconds to ensure event propagation @@ -408,16 +509,21 @@ func testErrEventSourceExcludeValue(t *testing.T, _ *kubernetes.Clientset, data }, "get filtered event") } - KubectlDeleteWithTemplate(t, data, "cloudEventSourceWithExcludeTemplate", cloudEventSourceWithExcludeTemplate) - KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + KubectlDeleteWithTemplate(t, data, "cloudEventSourceWithExcludeTemplate", ceTemplate) } // tests error events in include filter -func testErrEventSourceIncludeValue(t *testing.T, _ *kubernetes.Clientset, data templateData) { +func testErrEventSourceIncludeValue(t *testing.T, _ *kubernetes.Clientset, data templateData, isClusterScope bool) { t.Log("--- test emitting eventsource about scaledobject err with include filter---") - KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) - KubectlApplyWithTemplate(t, data, "cloudEventSourceWithIncludeTemplate", cloudEventSourceWithIncludeTemplate) + ceTemplate := "" + if isClusterScope { + ceTemplate = clusterCloudEventSourceWithIncludeTemplate + } else { + ceTemplate = cloudEventSourceWithIncludeTemplate + } + + KubectlApplyWithTemplate(t, data, "cloudEventSourceWithIncludeTemplate", ceTemplate) KubectlApplyWithTemplate(t, data, "scaledObjectErrTemplate", scaledObjectErrTemplate) // wait 15 seconds to ensure event propagation @@ -442,43 +548,56 @@ func testErrEventSourceIncludeValue(t *testing.T, _ *kubernetes.Clientset, data } } assert.NotEmpty(t, foundEvents) - KubectlDeleteWithTemplate(t, data, "cloudEventSourceWithIncludeTemplate", cloudEventSourceWithIncludeTemplate) - KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + KubectlDeleteWithTemplate(t, data, "cloudEventSourceWithIncludeTemplate", ceTemplate) } // tests error event type when creation -func testErrEventSourceCreation(t *testing.T, _ *kubernetes.Clientset, data templateData) { +func testErrEventSourceCreation(t *testing.T, _ *kubernetes.Clientset, data templateData, isClusterScope bool) { t.Log("--- test emitting eventsource about scaledobject err with include filter---") - KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + ceErrTemplate := "" + ceErrTemplate2 := "" + if isClusterScope { + ceErrTemplate = clusterCloudEventSourceWithErrTypeTemplate + ceErrTemplate2 = clusterCloudEventSourceWithErrTypeTemplate2 + } else { + ceErrTemplate = cloudEventSourceWithErrTypeTemplate + ceErrTemplate2 = cloudEventSourceWithErrTypeTemplate2 + } + + // KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) - err := KubectlApplyWithErrors(t, data, "cloudEventSourceWithErrTypeTemplate", cloudEventSourceWithErrTypeTemplate) - assert.ErrorContains(t, err, `The CloudEventSource "eventsource-test-ce-err" is invalid:`) + err := KubectlApplyWithErrors(t, data, "cloudEventSourceWithErrTypeTemplate", ceErrTemplate) + if isClusterScope { + assert.ErrorContains(t, err, `The ClusterCloudEventSource "eventsource-test-cce-err" is invalid:`) + } else { + assert.ErrorContains(t, err, `The CloudEventSource "eventsource-test-ce-err" is invalid:`) + } - err = KubectlApplyWithErrors(t, data, "cloudEventSourceWithErrTypeTemplate2", cloudEventSourceWithErrTypeTemplate2) + err = KubectlApplyWithErrors(t, data, "cloudEventSourceWithErrTypeTemplate2", ceErrTemplate2) assert.ErrorContains(t, err, `setting included types and excluded types at the same time is not supported`) - - KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) } // help function to load template data func getTemplateData() (templateData, []Template) { return templateData{ - TestNamespace: namespace, - ScaledObject: scaledObjectName, - DeploymentName: deploymentName, - ClientName: clientName, - CloudEventSourceName: cloudeventSourceName, - CloudeventSourceErrName: cloudeventSourceErrName, - CloudeventSourceErrName2: cloudeventSourceErrName2, - CloudEventHTTPReceiverName: cloudEventHTTPReceiverName, - CloudEventHTTPServiceName: cloudEventHTTPServiceName, - CloudEventHTTPServiceURL: cloudEventHTTPServiceURL, - ClusterName: clusterName, + TestNamespace: namespace, + ScaledObject: scaledObjectName, + DeploymentName: deploymentName, + ClientName: clientName, + CloudEventSourceName: cloudeventSourceName, + CloudeventSourceErrName: cloudeventSourceErrName, + CloudeventSourceErrName2: cloudeventSourceErrName2, + ClusterCloudEventSourceName: clusterCloudeventSourceName, + ClusterCloudeventSourceErrName: clusterCloudeventSourceErrName, + ClusterCloudeventSourceErrName2: clusterCloudeventSourceErrName2, + CloudEventHTTPReceiverName: cloudEventHTTPReceiverName, + CloudEventHTTPServiceName: cloudEventHTTPServiceName, + CloudEventHTTPServiceURL: cloudEventHTTPServiceURL, + ClusterName: clusterName, }, []Template{ {Name: "cloudEventHTTPReceiverTemplate", Config: cloudEventHTTPReceiverTemplate}, {Name: "cloudEventHTTPServiceTemplate", Config: cloudEventHTTPServiceTemplate}, {Name: "clientTemplate", Config: clientTemplate}, - {Name: "cloudEventSourceTemplate", Config: cloudEventSourceTemplate}, } } From 3975b8c0d85f8e79d65d70939b3478b25cae2dcd Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Mon, 12 Aug 2024 12:28:26 +0200 Subject: [PATCH 04/16] chore: Prepare v2.15.1 (#6051) * chore: Prepare v2.15.1 Signed-off-by: Jorge Turrado * fix changelog Signed-off-by: Jorge Turrado --------- Signed-off-by: Jorge Turrado Signed-off-by: Fira Curie --- .github/ISSUE_TEMPLATE/3_bug_report.yml | 1 + CHANGELOG.md | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/3_bug_report.yml b/.github/ISSUE_TEMPLATE/3_bug_report.yml index 3a43f5fd905..2b226b28984 100644 --- a/.github/ISSUE_TEMPLATE/3_bug_report.yml +++ b/.github/ISSUE_TEMPLATE/3_bug_report.yml @@ -57,6 +57,7 @@ body: label: KEDA Version description: What version of KEDA that are you running? options: + - "2.15.1" - "2.15.0" - "2.14.1" - "2.14.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index 283c07d1b96..4e7d28889e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ## History - [Unreleased](#unreleased) +- [v2.15.1](#v2151) - [v2.15.0](#v2150) - [v2.14.1](#v2141) - [v2.14.0](#v2140) @@ -70,8 +71,7 @@ Here is an overview of all new **experimental** features: ### Fixes -- **General**: Hashicorp Vault PKI doesn't fail with due to KeyPair mismatch ([#6028](https://github.com/kedacore/keda/issues/6028)) -- **JetStream**: Consumer leader change is correctly detected ([#6042](https://github.com/kedacore/keda/issues/6042)) +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) ### Deprecations @@ -85,6 +85,18 @@ New deprecation(s): - TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +### Other + +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + +## v2.15.1 + +### Fixes + +- **General**: Hashicorp Vault PKI doesn't fail with due to KeyPair mismatch ([#6028](https://github.com/kedacore/keda/issues/6028)) +- **JetStream**: Consumer leader change is correctly detected ([#6042](https://github.com/kedacore/keda/issues/6042)) + + ### Other - **General**: Bump deps and k8s deps to 0.29.7 ([#6035](https://github.com/kedacore/keda/pull/6035)) From baef8e85a30484702ab06062e2cb0ed71db5bb27 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Tue, 13 Aug 2024 15:53:01 +0200 Subject: [PATCH 05/16] chore: Enable Az Pipeline (wi) e2e (#6070) * chore: Enable Az Pipeline e2e Signed-off-by: Jorge Turrado * update manifests Signed-off-by: Jorge Turrado --------- Signed-off-by: Jorge Turrado Signed-off-by: Fira Curie --- .../crd/bases/eventing.keda.sh_clustercloudeventsources.yaml | 2 +- .../azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml index 079a8976cbe..55c806370dd 100644 --- a/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml +++ b/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: clustercloudeventsources.eventing.keda.sh spec: group: eventing.keda.sh diff --git a/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go b/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go index 026e4f74944..f1db3b68871 100644 --- a/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go +++ b/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go @@ -165,8 +165,7 @@ spec: ` ) -// TODO: Enable the test again when the infra is fixed -func DisabledTestScaler(t *testing.T) { +func TestScaler(t *testing.T) { // setup t.Log("--- setting up ---") require.NotEmpty(t, organizationURL, "AZURE_DEVOPS_ORGANIZATION_URL env variable is required for azure pipelines test") From b1867656090e42540ed5c2b4da56ba5646b08afe Mon Sep 17 00:00:00 2001 From: Fira Curie Date: Sat, 24 Aug 2024 23:25:31 +0200 Subject: [PATCH 06/16] change config to declarative format Signed-off-by: Fira Curie --- pkg/scalers/gitlab_runner_scaler.go | 65 ++++++----------------------- 1 file changed, 13 insertions(+), 52 deletions(-) diff --git a/pkg/scalers/gitlab_runner_scaler.go b/pkg/scalers/gitlab_runner_scaler.go index 1026609c8e3..6665dce558c 100644 --- a/pkg/scalers/gitlab_runner_scaler.go +++ b/pkg/scalers/gitlab_runner_scaler.go @@ -3,7 +3,6 @@ package scalers import ( "context" "encoding/json" - "errors" "fmt" "net/http" "net/url" @@ -17,11 +16,6 @@ import ( ) const ( - // externalMetricType is the type of the external metric. - defaultTargetPipelineQueueLength = 1 - // defaultGitlabAPIURL is the default GitLab API base URL. - defaultGitlabAPIURL = "https://gitlab.com" - // pipelineWaitingForResourceStatus is the status of the pipelines that are waiting for resources. pipelineWaitingForResourceStatus = "waiting_for_resource" @@ -39,11 +33,11 @@ type gitlabRunnerScaler struct { } type gitlabRunnerMetadata struct { - gitlabAPIURL *url.URL - personalAccessToken string - projectID string + gitlabAPIURL *url.URL `keda:"name=gitlabAPIURL, order=triggerMetadata, default=https://gitlab.com, optional"` + personalAccessToken string `keda:"name=personalAccessToken, order=authParams"` + projectID string `keda:"name=projectID, order=triggerMetadata"` - targetPipelineQueueLength int64 + targetPipelineQueueLength int64 `keda:"name=targetPipelineQueueLength, order=triggerMetadata, default=1"` triggerIndex int } @@ -61,6 +55,10 @@ func NewGitLabRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error parsing GitLab Runner metadata: %w", err) } + uri := constructGitlabAPIPipelinesURL(*meta.gitlabAPIURL, meta.projectID, pipelineWaitingForResourceStatus) + + meta.gitlabAPIURL = &uri + return &gitlabRunnerScaler{ metricType: metricType, metadata: meta, @@ -70,51 +68,14 @@ func NewGitLabRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { } func parseGitLabRunnerMetadata(config *scalersconfig.ScalerConfig) (*gitlabRunnerMetadata, error) { - meta := &gitlabRunnerMetadata{} - meta.targetPipelineQueueLength = defaultTargetWorkflowQueueLength - - // Get the projectID - projectIDValue, err := getValueFromMetaOrEnv("projectID", config.TriggerMetadata, config.ResolvedEnv) - if err != nil { - return nil, err - } - - meta.projectID = projectIDValue - - // Get the targetWorkflowQueueLength - targetWorkflowQueueLength, err := getInt64ValueFromMetaOrEnv("targetWorkflowQueueLength", config) - if err != nil || targetWorkflowQueueLength == 0 { - meta.targetPipelineQueueLength = defaultTargetPipelineQueueLength - } - meta.targetPipelineQueueLength = targetWorkflowQueueLength - - // Get the personalAccessToken - personalAccessToken, ok := config.AuthParams["personalAccessToken"] - if !ok || personalAccessToken == "" { - return nil, errors.New("no personalAccessToken provided") - } - - meta.personalAccessToken = personalAccessToken - - // Get the GitLab API URL - gitlabAPIURLValue, err := getValueFromMetaOrEnv("gitlabAPIURL", config.TriggerMetadata, config.ResolvedEnv) - if err != nil || gitlabAPIURLValue == "" { - gitlabAPIURLValue = defaultGitlabAPIURL - } - - gitlabAPIURL, err := url.Parse(gitlabAPIURLValue) - if err != nil { - return nil, fmt.Errorf("parsing gitlabAPIURL: %w", err) - } - - // Construct the GitLab API URL - uri := constructGitlabAPIPipelinesURL(*gitlabAPIURL, projectIDValue, pipelineWaitingForResourceStatus) - - meta.gitlabAPIURL = &uri + meta := gitlabRunnerMetadata{} meta.triggerIndex = config.TriggerIndex + if err := config.TypedConfig(&meta); err != nil { + return nil, fmt.Errorf("error parsing gitlabRunner metadata: %w", err) + } - return meta, nil + return &meta, nil } func (s *gitlabRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { From 21af44fd4e5ceedce4517777ab301b9cead6fa14 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Mon, 19 Aug 2024 10:54:42 +0200 Subject: [PATCH 07/16] chore: Improve azure e2e coverage (#6090) * chore: Improve azure e2e coverage Signed-off-by: Jorge Turrado * add missing vars Signed-off-by: Jorge Turrado --------- Signed-off-by: Jorge Turrado Signed-off-by: Fira Curie --- .../azure_event_hub_blob_metadata_wi_test.go | 234 +++++++++++++++ .../azure_event_hub_dapr_wi_test.go | 272 ++++++++++++++++++ 2 files changed, 506 insertions(+) create mode 100644 tests/scalers/azure/azure_event_hub_blob_metadata_wi/azure_event_hub_blob_metadata_wi_test.go create mode 100644 tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go diff --git a/tests/scalers/azure/azure_event_hub_blob_metadata_wi/azure_event_hub_blob_metadata_wi_test.go b/tests/scalers/azure/azure_event_hub_blob_metadata_wi/azure_event_hub_blob_metadata_wi_test.go new file mode 100644 index 00000000000..fd0c965f4cd --- /dev/null +++ b/tests/scalers/azure/azure_event_hub_blob_metadata_wi/azure_event_hub_blob_metadata_wi_test.go @@ -0,0 +1,234 @@ +//go:build e2e +// +build e2e + +package azure_event_hub_blob_metadata_wi_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + azurehelper "github.com/kedacore/keda/v2/tests/scalers/azure/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "azure-event-hub-blob-metadata-wi" + eventhubConsumerGroup = "$Default" +) + +var ( + storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") + checkpointContainerName = fmt.Sprintf("blob-checkpoint-%d", GetRandomNumber()) + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + accountName = "" +) + +type templateData struct { + TestNamespace string + SecretName string + EventHubConnection string + StorageConnection string + DeploymentName string + TriggerAuthName string + ScaledObjectName string + CheckpointContainerName string + ConsumerGroup string + AccountName string + EventHubName string + EventHubNamespaceName string +} + +const ( + secretTemplate = ` +apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + connection: {{.EventHubConnection}} + storageConnection: {{.StorageConnection}} +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: {{.DeploymentName}} + image: ghcr.io/kedacore/tests-azure-eventhub-dotnet + env: + - name: EVENTHUB_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: {{.SecretName}} + key: connection + - name: STORAGE_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: {{.SecretName}} + key: storageConnection + - name: CHECKPOINT_CONTAINER + value: {{.CheckpointContainerName}} + - name: EVENTHUB_CONSUMERGROUP + value: {{.ConsumerGroup}} +` + + triggerAuthTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: azure-workload +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + minReplicaCount: 0 + maxReplicaCount: 1 + cooldownPeriod: 10 + triggers: + - authenticationRef: + name: {{.TriggerAuthName}} + metadata: + activationUnprocessedEventThreshold: '10' + blobContainer: {{.CheckpointContainerName}} + checkpointStrategy: blobMetadata + consumerGroup: {{.ConsumerGroup}} + unprocessedEventThreshold: '64' + eventHubName: {{.EventHubName}} + eventHubNamespace: {{.EventHubNamespaceName}} + storageAccountName: {{.AccountName}} + type: azure-eventhub +` +) + +func TestScaler(t *testing.T) { + ctx := context.Background() + t.Log("--- setting up ---") + require.NotEmpty(t, storageConnectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure eventhub test") + + accountName = azurehelper.GetAccountFromStorageConnectionString(storageConnectionString) + + eventHubHelper := azurehelper.NewEventHubHelper(t) + eventHubHelper.CreateEventHub(ctx, t) + blobClient, err := azblob.NewClientFromConnectionString(storageConnectionString, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = blobClient.CreateContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot create the container - %s", err) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData(eventHubHelper) + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + // We need to wait till consumer creates the checkpoint + eventHubHelper.PublishEventHubdEvents(ctx, t, 1) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") + time.Sleep(time.Duration(60) * time.Second) + KubectlApplyMultipleWithTemplate(t, data, []Template{{Name: "scaledObjectTemplate", Config: scaledObjectTemplate}}) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), + "replica count should be 0 after 1 minute") + + // test scaling + testActivation(ctx, t, kc, eventHubHelper) + testScaleOut(ctx, t, kc, eventHubHelper) + testScaleIn(t, kc) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + eventHubHelper.DeleteEventHub(ctx, t) + _, err = blobClient.DeleteContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot delete the container - %s", err) +} + +func getTemplateData(eventHubHelper azurehelper.EventHubHelper) (templateData, []Template) { + base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventHubHelper.ConnectionString())) + base64StorageConnection := base64.StdEncoding.EncodeToString([]byte(storageConnectionString)) + + return templateData{ + TestNamespace: testNamespace, + SecretName: secretName, + EventHubConnection: base64EventhubConnection, + StorageConnection: base64StorageConnection, + CheckpointContainerName: checkpointContainerName, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + TriggerAuthName: triggerAuthName, + ConsumerGroup: eventhubConsumerGroup, + AccountName: accountName, + EventHubName: eventHubHelper.EventHub(), + EventHubNamespaceName: eventHubHelper.EventHubNamespace(), + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "triggerAuthTemplate", Config: triggerAuthTemplate}, + } +} + +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { + t.Log("--- testing activation ---") + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) +} + +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { + t.Log("--- testing scale out ---") + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale in ---") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), + "replica count should be 0 after 1 minute") +} diff --git a/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go b/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go new file mode 100644 index 00000000000..13870a40bc4 --- /dev/null +++ b/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go @@ -0,0 +1,272 @@ +//go:build e2e +// +build e2e + +package azure_event_hub_dapr_wi_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + azurehelper "github.com/kedacore/keda/v2/tests/scalers/azure/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "azure-event-hub-dapr" + eventhubConsumerGroup = "$Default" +) + +var ( + storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") + storageAccountName = getValueFromConnectionString(storageConnectionString, "AccountName") + storageAccountKey = getValueFromConnectionString(storageConnectionString, "AccountKey") + checkpointContainerName = fmt.Sprintf("blob-checkpoint-%d", GetRandomNumber()) + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) +) + +type templateData struct { + TestNamespace string + SecretName string + EventHubConnection string + StorageConnection string + Base64EventHubConnection string + Base64StorageConnection string + StorageAccountName string + StorageAccountKey string + DeploymentName string + TriggerAuthName string + ScaledObjectName string + CheckpointContainerName string + ConsumerGroup string + EventHubName string + EventHubNamespaceName string +} + +const ( + secretTemplate = ` +apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + connection: {{.Base64EventHubConnection}} + storageConnection: {{.Base64StorageConnection}} +stringData: + azure-eventhub-binding.yaml: | + apiVersion: dapr.io/v1alpha1 + kind: Component + metadata: + name: azure-eventhub-dapr + spec: + type: bindings.azure.eventhubs + version: v1 + metadata: + - name: connectionString + value: {{.EventHubConnection}} + - name: consumerGroup + value: {{.ConsumerGroup}} + - name: storageAccountName + value: {{.StorageAccountName}} + - name: storageAccountKey + value: {{.StorageAccountKey}} + - name: storageContainerName + value: {{.CheckpointContainerName}} +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + volumes: + - name: {{.SecretName}} + secret: + secretName: {{.SecretName}} + containers: + - name: dapr + image: daprio/daprd:1.9.5-mariner + imagePullPolicy: Always + command: ["./daprd", "-app-id", "azure-eventhub-dapr", "-app-port", "3000", "-components-path", "/components", "-log-level", "debug"] + volumeMounts: + - mountPath: "/components" + name: {{.SecretName}} + readOnly: true + - name: {{.DeploymentName}} + image: ghcr.io/kedacore/tests-azure-eventhub-dapr + env: + - name: APP_PORT + value: "3000" +` + + triggerAuthTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: azure-workload +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + minReplicaCount: 0 + maxReplicaCount: 1 + cooldownPeriod: 10 + triggers: + - authenticationRef: + name: {{.TriggerAuthName}} + metadata: + activationUnprocessedEventThreshold: '10' + blobContainer: {{.CheckpointContainerName}} + checkpointStrategy: dapr + consumerGroup: {{.ConsumerGroup}} + unprocessedEventThreshold: '64' + eventHubName: {{.EventHubName}} + eventHubNamespace: {{.EventHubNamespaceName}} + storageAccountName: {{.StorageAccountName}} + type: azure-eventhub +` +) + +func TestScaler(t *testing.T) { + ctx := context.Background() + t.Log("--- setting up ---") + require.NotEmpty(t, storageConnectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure eventhub test") + + eventHubHelper := azurehelper.NewEventHubHelper(t) + eventHubHelper.CreateEventHub(ctx, t) + blobClient, err := azblob.NewClientFromConnectionString(storageConnectionString, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = blobClient.CreateContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot create the container - %s", err) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData(eventHubHelper) + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + // We need to wait till consumer creates the checkpoint + eventHubHelper.PublishEventHubdEvents(ctx, t, 1) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") + time.Sleep(time.Duration(60) * time.Second) + KubectlApplyMultipleWithTemplate(t, data, []Template{{Name: "scaledObjectTemplate", Config: scaledObjectTemplate}}) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), + "replica count should be 0 after 1 minute") + + // test scaling + testActivation(ctx, t, kc, eventHubHelper) + testScaleOut(ctx, t, kc, eventHubHelper) + testScaleIn(t, kc) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + eventHubHelper.DeleteEventHub(ctx, t) + _, err = blobClient.DeleteContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot delete the container - %s", err) +} + +func getTemplateData(eventHubHelper azurehelper.EventHubHelper) (templateData, []Template) { + base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventHubHelper.ConnectionString())) + base64StorageConnection := base64.StdEncoding.EncodeToString([]byte(storageConnectionString)) + + return templateData{ + TestNamespace: testNamespace, + SecretName: secretName, + EventHubConnection: eventHubHelper.ConnectionString(), + StorageConnection: storageConnectionString, + Base64EventHubConnection: base64EventhubConnection, + Base64StorageConnection: base64StorageConnection, + StorageAccountName: storageAccountName, + StorageAccountKey: storageAccountKey, + CheckpointContainerName: checkpointContainerName, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + TriggerAuthName: triggerAuthName, + ConsumerGroup: eventhubConsumerGroup, + EventHubName: eventHubHelper.EventHub(), + EventHubNamespaceName: eventHubHelper.EventHubNamespace(), + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "triggerAuthTemplate", Config: triggerAuthTemplate}, + } +} + +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { + t.Log("--- testing activation ---") + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) +} + +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { + t.Log("--- testing scale out ---") + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale in ---") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), + "replica count should be 0 after 1 minute") +} + +func getValueFromConnectionString(storageAccountConnectionString string, keyName string) string { + items := strings.Split(storageAccountConnectionString, ";") + for _, item := range items { + keyValue := strings.SplitN(item, "=", 2) + if keyValue[0] == keyName { + return keyValue[1] + } + } + + return "" +} From 32ea028310c2805428e8380b6fd31556f555140a Mon Sep 17 00:00:00 2001 From: SpiritZhou Date: Tue, 20 Aug 2024 06:51:27 +0800 Subject: [PATCH 08/16] Provide CloudEvents around the management of ScaledJobs resources (#6072) * Update Signed-off-by: SpiritZhou * Update ChangeLog Signed-off-by: SpiritZhou * Update Signed-off-by: SpiritZhou --------- Signed-off-by: SpiritZhou Signed-off-by: Fira Curie --- CHANGELOG.md | 1 + apis/eventing/v1alpha1/cloudevent_types.go | 18 +++- cmd/operator/main.go | 2 +- .../eventing.keda.sh_cloudeventsources.yaml | 8 ++ ...ting.keda.sh_clustercloudeventsources.yaml | 8 ++ controllers/keda/scaledjob_controller.go | 16 +-- controllers/keda/scaledjob_finalizer.go | 4 +- controllers/keda/suite_test.go | 6 +- pkg/common/message/message.go | 4 + tests/internals/events/events_test.go | 102 ++++++++++++++++++ 10 files changed, 154 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e7d28889e2..f0d98673759 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New - **CloudEventSource**: Introduce ClusterCloudEventSource ([#3533](https://github.com/kedacore/keda/issues/3533)) +- **CloudEventSource**: Provide CloudEvents around the management of ScaledJobs resources ([#3523](https://github.com/kedacore/keda/issues/3523)) #### Experimental diff --git a/apis/eventing/v1alpha1/cloudevent_types.go b/apis/eventing/v1alpha1/cloudevent_types.go index fdab5229c96..c3cfaecad89 100644 --- a/apis/eventing/v1alpha1/cloudevent_types.go +++ b/apis/eventing/v1alpha1/cloudevent_types.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 // CloudEventType contains the list of cloudevent types -// +kubebuilder:validation:Enum=keda.scaledobject.ready.v1;keda.scaledobject.failed.v1 +// +kubebuilder:validation:Enum=keda.scaledobject.ready.v1;keda.scaledobject.failed.v1;keda.scaledobject.removed.v1;keda.scaledjob.ready.v1;keda.scaledjob.failed.v1;keda.scaledjob.removed.v1 type CloudEventType string const ( @@ -27,8 +27,20 @@ const ( // ScaledObjectFailedType is for event when creating ScaledObject failed ScaledObjectFailedType CloudEventType = "keda.scaledobject.failed.v1" - // ScaledObjectFailedType is for event when removed ScaledObject + // ScaledObjectRemovedType is for event when removed ScaledObject ScaledObjectRemovedType CloudEventType = "keda.scaledobject.removed.v1" + + // ScaledJobReadyType is for event when a new ScaledJob is ready + ScaledJobReadyType CloudEventType = "keda.scaledjob.ready.v1" + + // ScaledJobFailedType is for event when creating ScaledJob failed + ScaledJobFailedType CloudEventType = "keda.scaledjob.failed.v1" + + // ScaledJobRemovedType is for event when removed ScaledJob + ScaledJobRemovedType CloudEventType = "keda.scaledjob.removed.v1" ) -var AllEventTypes = []CloudEventType{ScaledObjectFailedType, ScaledObjectReadyType} +var AllEventTypes = []CloudEventType{ + ScaledObjectFailedType, ScaledObjectReadyType, ScaledObjectRemovedType, + ScaledJobFailedType, ScaledJobReadyType, ScaledJobRemovedType, +} diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 2ea6ed90f90..aa81dc79fce 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -236,7 +236,7 @@ func main() { Client: mgr.GetClient(), Scheme: mgr.GetScheme(), GlobalHTTPTimeout: globalHTTPTimeout, - Recorder: eventRecorder, + EventEmitter: eventEmitter, SecretsLister: secretInformer.Lister(), SecretsSynced: secretInformer.Informer().HasSynced, }).SetupWithManager(mgr, controller.Options{ diff --git a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml index 3e1c957a6d2..d121df22074 100644 --- a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml +++ b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml @@ -88,6 +88,10 @@ spec: enum: - keda.scaledobject.ready.v1 - keda.scaledobject.failed.v1 + - keda.scaledobject.removed.v1 + - keda.scaledjob.ready.v1 + - keda.scaledjob.failed.v1 + - keda.scaledjob.removed.v1 type: string type: array includedEventTypes: @@ -97,6 +101,10 @@ spec: enum: - keda.scaledobject.ready.v1 - keda.scaledobject.failed.v1 + - keda.scaledobject.removed.v1 + - keda.scaledjob.ready.v1 + - keda.scaledjob.failed.v1 + - keda.scaledjob.removed.v1 type: string type: array type: object diff --git a/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml index 55c806370dd..8c81de1f594 100644 --- a/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml +++ b/config/crd/bases/eventing.keda.sh_clustercloudeventsources.yaml @@ -86,6 +86,10 @@ spec: enum: - keda.scaledobject.ready.v1 - keda.scaledobject.failed.v1 + - keda.scaledobject.removed.v1 + - keda.scaledjob.ready.v1 + - keda.scaledjob.failed.v1 + - keda.scaledjob.removed.v1 type: string type: array includedEventTypes: @@ -95,6 +99,10 @@ spec: enum: - keda.scaledobject.ready.v1 - keda.scaledobject.failed.v1 + - keda.scaledobject.removed.v1 + - keda.scaledjob.ready.v1 + - keda.scaledjob.failed.v1 + - keda.scaledjob.removed.v1 type: string type: array type: object diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index 845ba5aca90..bb1193be8b2 100755 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -39,8 +38,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" kedacontrollerutil "github.com/kedacore/keda/v2/controllers/keda/util" + "github.com/kedacore/keda/v2/pkg/common/message" + "github.com/kedacore/keda/v2/pkg/eventemitter" "github.com/kedacore/keda/v2/pkg/eventreason" "github.com/kedacore/keda/v2/pkg/metricscollector" "github.com/kedacore/keda/v2/pkg/scaling" @@ -56,7 +58,7 @@ type ScaledJobReconciler struct { client.Client Scheme *runtime.Scheme GlobalHTTPTimeout time.Duration - Recorder record.EventRecorder + EventEmitter eventemitter.EventHandler scaledJobGenerations *sync.Map scaleHandler scaling.ScaleHandler @@ -133,7 +135,7 @@ func (r *ScaledJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( if !scaledJob.Status.Conditions.AreInitialized() { conditions := kedav1alpha1.GetInitializedConditions() if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledJob, conditions); err != nil { - r.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.ScaledJobUpdateFailed, err.Error()) + r.EventEmitter.Emit(scaledJob, req.NamespacedName.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledJobFailedType, eventreason.ScaledJobUpdateFailed, err.Error()) return ctrl.Result{}, err } } @@ -143,7 +145,7 @@ func (r *ScaledJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( errMsg := "ScaledJob.spec.jobTargetRef not found" err := fmt.Errorf(errMsg) reqLogger.Error(err, errMsg) - r.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.ScaledJobCheckFailed, errMsg) + r.EventEmitter.Emit(scaledJob, req.NamespacedName.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledJobFailedType, eventreason.ScaledJobCheckFailed, errMsg) return ctrl.Result{}, err } conditions := scaledJob.Status.Conditions.DeepCopy() @@ -152,18 +154,18 @@ func (r *ScaledJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( reqLogger.Error(err, msg) conditions.SetReadyCondition(metav1.ConditionFalse, "ScaledJobCheckFailed", msg) conditions.SetActiveCondition(metav1.ConditionUnknown, "UnknownState", "ScaledJob check failed") - r.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.ScaledJobCheckFailed, msg) + r.EventEmitter.Emit(scaledJob, req.NamespacedName.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledJobFailedType, eventreason.ScaledJobCheckFailed, msg) } else { wasReady := conditions.GetReadyCondition() if wasReady.IsFalse() || wasReady.IsUnknown() { - r.Recorder.Event(scaledJob, corev1.EventTypeNormal, eventreason.ScaledJobReady, "ScaledJob is ready for scaling") + r.EventEmitter.Emit(scaledJob, req.NamespacedName.Namespace, corev1.EventTypeNormal, eventingv1alpha1.ScaledObjectReadyType, eventreason.ScaledJobReady, message.ScaledJobReadyMsg) } reqLogger.V(1).Info(msg) conditions.SetReadyCondition(metav1.ConditionTrue, "ScaledJobReady", msg) } if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledJob, &conditions); err != nil { - r.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.ScaledJobUpdateFailed, err.Error()) + r.EventEmitter.Emit(scaledJob, req.NamespacedName.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledJobFailedType, eventreason.ScaledJobUpdateFailed, err.Error()) return ctrl.Result{}, err } diff --git a/controllers/keda/scaledjob_finalizer.go b/controllers/keda/scaledjob_finalizer.go index 99636eca2e9..61c4535daae 100644 --- a/controllers/keda/scaledjob_finalizer.go +++ b/controllers/keda/scaledjob_finalizer.go @@ -22,8 +22,10 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/controllers/keda/util" + "github.com/kedacore/keda/v2/pkg/common/message" "github.com/kedacore/keda/v2/pkg/eventreason" ) @@ -57,7 +59,7 @@ func (r *ScaledJobReconciler) finalizeScaledJob(ctx context.Context, logger logr } logger.Info("Successfully finalized ScaledJob") - r.Recorder.Event(scaledJob, corev1.EventTypeNormal, eventreason.ScaledJobDeleted, "ScaledJob was deleted") + r.EventEmitter.Emit(scaledJob, namespacedName, corev1.EventTypeWarning, eventingv1alpha1.ScaledJobRemovedType, eventreason.ScaledJobDeleted, message.ScaledJobRemoved) return nil } diff --git a/controllers/keda/suite_test.go b/controllers/keda/suite_test.go index 8659742f950..482a45b135c 100644 --- a/controllers/keda/suite_test.go +++ b/controllers/keda/suite_test.go @@ -101,9 +101,9 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) err = (&ScaledJobReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("keda-operator"), + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + EventEmitter: eventemitter.NewEventEmitter(k8sManager.GetClient(), k8sManager.GetEventRecorderFor("keda-operator"), "kubernetes-default", nil), }).SetupWithManager(k8sManager, controller.Options{}) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/common/message/message.go b/pkg/common/message/message.go index 490b77a6f0e..b4a6458f3e2 100644 --- a/pkg/common/message/message.go +++ b/pkg/common/message/message.go @@ -30,4 +30,8 @@ const ( ScaleTargetNoSubresourceMsg = "Target resource doesn't expose /scale subresource" ScaledObjectRemoved = "ScaledObject was deleted" + + ScaledJobReadyMsg = "ScaledJob is ready for scaling" + + ScaledJobRemoved = "ScaledJob was deleted" ) diff --git a/tests/internals/events/events_test.go b/tests/internals/events/events_test.go index 90a81aa28cc..5f3c4983696 100644 --- a/tests/internals/events/events_test.go +++ b/tests/internals/events/events_test.go @@ -28,6 +28,9 @@ var ( scaledObjectName = fmt.Sprintf("%s-so", testName) scaledObjectTargetNotFoundName = fmt.Sprintf("%s-so-target-error", testName) scaledObjectTargetNoSubresourceName = fmt.Sprintf("%s-so-target-no-subresource", testName) + + scaledJobName = fmt.Sprintf("%s-sj", testName) + scaledJobErrName = fmt.Sprintf("%s-sj-target-error", testName) ) type templateData struct { @@ -38,6 +41,8 @@ type templateData struct { DeploymentName string MonitoredDeploymentName string DaemonsetName string + ScaledJobName string + ScaledJobErrName string } const ( @@ -155,6 +160,69 @@ spec: podSelector: 'app={{.DeploymentName}}' value: '1' ` + + scaledJobTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: {{.ScaledJobName}} + namespace: {{.TestNamespace}} +spec: + jobTargetRef: + template: + spec: + containers: + - name: external-executor + image: busybox + command: + - sleep + - "30" + imagePullPolicy: IfNotPresent + restartPolicy: Never + backoffLimit: 1 + pollingInterval: 5 + minReplicaCount: 0 + maxReplicaCount: 8 + successfulJobsHistoryLimit: 0 + failedJobsHistoryLimit: 0 + triggers: + - type: kubernetes-workload + metadata: + podSelector: 'app={{.MonitoredDeploymentName}}' + value: '1' +` + + scaledJobErrTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: {{.ScaledJobErrName}} + namespace: {{.TestNamespace}} +spec: + jobTargetRef: + template: + spec: + containers: + - name: external-executor + image: busybox + command: + - sleep + - "30" + imagePullPolicy: IfNotPresent + restartPolicy: Never + backoffLimit: 1 + pollingInterval: 5 + minReplicaCount: 0 + maxReplicaCount: 8 + successfulJobsHistoryLimit: 0 + failedJobsHistoryLimit: 0 + triggers: + - type: cpu + name: x + metadata: + typex: Utilization + value: "50" +` ) func TestEvents(t *testing.T) { @@ -172,6 +240,8 @@ func TestEvents(t *testing.T) { testTargetNotFoundErr(t, kc, data) testTargetNotSupportEventErr(t, kc, data) + testScaledJobNormalEvent(t, kc, data) + testScaledJobTargetNotSupportEventErr(t, kc, data) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) } @@ -185,6 +255,8 @@ func getTemplateData() (templateData, []Template) { ScaledObjectName: scaledObjectName, ScaledObjectTargetNotFoundName: scaledObjectTargetNotFoundName, ScaledObjectTargetNoSubresourceName: scaledObjectTargetNoSubresourceName, + ScaledJobName: scaledJobName, + ScaledJobErrName: scaledJobErrName, }, []Template{} } @@ -210,6 +282,10 @@ func testNormalEvent(t *testing.T, kc *kubernetes.Clientset, data templateData) checkingEvent(t, scaledObjectName, 0, eventreason.KEDAScalersStarted, fmt.Sprintf(message.ScalerIsBuiltMsg, "kubernetes-workload")) checkingEvent(t, scaledObjectName, 1, eventreason.KEDAScalersStarted, message.ScalerStartMsg) checkingEvent(t, scaledObjectName, 2, eventreason.ScaledObjectReady, message.ScalerReadyMsg) + + KubectlDeleteWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) + KubectlDeleteWithTemplate(t, data, "monitoredDeploymentName", monitoredDeploymentTemplate) + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) } func testTargetNotFoundErr(t *testing.T, _ *kubernetes.Clientset, data templateData) { @@ -228,3 +304,29 @@ func testTargetNotSupportEventErr(t *testing.T, _ *kubernetes.Clientset, data te checkingEvent(t, scaledObjectTargetNoSubresourceName, -2, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNoSubresourceMsg) checkingEvent(t, scaledObjectTargetNoSubresourceName, -1, eventreason.ScaledObjectCheckFailed, message.ScaleTargetErrMsg) } + +func testScaledJobNormalEvent(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing ScaledJob normal event ---") + + KubectlApplyWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) + KubectlApplyWithTemplate(t, data, "monitoredDeploymentName", monitoredDeploymentTemplate) + KubectlApplyWithTemplate(t, data, "scaledJobTemplate", scaledJobTemplate) + + KubernetesScaleDeployment(t, kc, monitoredDeploymentName, 2, testNamespace) + assert.True(t, WaitForJobCount(t, kc, testNamespace, 2, 60, 1), + "replica count should be 2 after 1 minute") + checkingEvent(t, scaledJobName, 0, eventreason.KEDAScalersStarted, fmt.Sprintf(message.ScalerIsBuiltMsg, "kubernetes-workload")) + checkingEvent(t, scaledJobName, 1, eventreason.KEDAScalersStarted, message.ScalerStartMsg) + checkingEvent(t, scaledJobName, 2, eventreason.ScaledJobReady, message.ScaledJobReadyMsg) + + KubectlDeleteWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) + KubectlDeleteWithTemplate(t, data, "monitoredDeploymentName", monitoredDeploymentTemplate) + KubectlDeleteWithTemplate(t, data, "scaledJobTemplate", scaledJobTemplate) +} + +func testScaledJobTargetNotSupportEventErr(t *testing.T, _ *kubernetes.Clientset, data templateData) { + t.Log("--- testing target not support error event ---") + + KubectlApplyWithTemplate(t, data, "scaledJobErrTemplate", scaledJobErrTemplate) + checkingEvent(t, scaledJobErrName, -1, eventreason.ScaledJobCheckFailed, "Failed to ensure ScaledJob is correctly created") +} From 9c088222f19d63a005321f7b6fddb7750a683cd4 Mon Sep 17 00:00:00 2001 From: rickbrouwer Date: Tue, 20 Aug 2024 17:19:06 +0200 Subject: [PATCH 09/16] Refactor IBM MQ scaler and remove and deprecate variables (#6034) Signed-off-by: Rick Brouwer Signed-off-by: Fira Curie --- CHANGELOG.md | 4 +- pkg/scalers/ibmmq_scaler.go | 217 ++++++++++-------------------- pkg/scalers/ibmmq_scaler_test.go | 54 ++++---- tests/scalers/ibmmq/ibmmq_test.go | 3 +- 4 files changed, 100 insertions(+), 178 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0d98673759..25ef88e5827 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,11 +76,13 @@ Here is an overview of all new **experimental** features: ### Deprecations +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + You can find all deprecations in [this overview](https://github.com/kedacore/keda/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3Abreaking-change) and [join the discussion here](https://github.com/kedacore/keda/discussions/categories/deprecations). New deprecation(s): -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- IBM MQ Scaler: Remove and deprecate unused variables in IBM MQ scaler ([#6033](https://github.com/kedacore/keda/issues/6033)) ### Breaking Changes diff --git a/pkg/scalers/ibmmq_scaler.go b/pkg/scalers/ibmmq_scaler.go index efd83d8c968..0b0b4c893f4 100644 --- a/pkg/scalers/ibmmq_scaler.go +++ b/pkg/scalers/ibmmq_scaler.go @@ -8,9 +8,7 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" - "time" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" @@ -20,39 +18,28 @@ import ( kedautil "github.com/kedacore/keda/v2/pkg/util" ) -// Default variables and settings -const ( - defaultTargetQueueDepth = 20 - defaultTLSDisabled = false -) - -// IBMMQScaler assigns struct data pointer to metadata variable -type IBMMQScaler struct { - metricType v2.MetricTargetType - metadata *IBMMQMetadata - defaultHTTPTimeout time.Duration - httpClient *http.Client - logger logr.Logger +type ibmmqScaler struct { + metricType v2.MetricTargetType + metadata ibmmqMetadata + httpClient *http.Client + logger logr.Logger } -// IBMMQMetadata Metadata used by KEDA to query IBM MQ queue depth and scale -type IBMMQMetadata struct { - host string - queueManager string - queueName string - username string - password string - queueDepth int64 - activationQueueDepth int64 - tlsDisabled bool - triggerIndex int - - // TLS - ca string - cert string - key string - keyPassword string - unsafeSsl bool +type ibmmqMetadata struct { + Host string `keda:"name=host, order=triggerMetadata"` + QueueName string `keda:"name=queueName, order=triggerMetadata"` + QueueDepth int64 `keda:"name=queueDepth, order=triggerMetadata, default=20"` + ActivationQueueDepth int64 `keda:"name=activationQueueDepth, order=triggerMetadata, default=0"` + Username string `keda:"name=username, order=authParams;resolvedEnv;triggerMetadata"` + Password string `keda:"name=password, order=authParams;resolvedEnv;triggerMetadata"` + UnsafeSsl bool `keda:"name=unsafeSsl, order=triggerMetadata, default=false"` + TLS bool `keda:"name=tls, order=triggerMetadata, default=false"` // , deprecated=use unsafeSsl instead + CA string `keda:"name=ca, order=authParams, optional"` + Cert string `keda:"name=cert, order=authParams, optional"` + Key string `keda:"name=key, order=authParams, optional"` + KeyPassword string `keda:"name=keyPassword, order=authParams, optional"` + + triggerIndex int } // CommandResponse Full structured response from MQ admin REST query @@ -71,142 +58,79 @@ type Parameters struct { Curdepth int `json:"curdepth"` } -// NewIBMMQScaler creates a new IBM MQ scaler +func (m *ibmmqMetadata) Validate() error { + _, err := url.ParseRequestURI(m.Host) + if err != nil { + return fmt.Errorf("invalid URL: %w", err) + } + + if (m.Cert == "") != (m.Key == "") { + return fmt.Errorf("both cert and key must be provided when using TLS") + } + + // TODO: DEPRECATED to be removed in v2.18 + if m.TLS && m.UnsafeSsl { + return fmt.Errorf("'tls' and 'unsafeSsl' are both specified. Please use only 'unsafeSsl'") + } + + return nil +} + func NewIBMMQScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %w", err) } + logger := InitializeLogger(config, "ibm_mq_scaler") + meta, err := parseIBMMQMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing IBM MQ metadata: %w", err) } - httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, meta.tlsDisabled) + // TODO: DEPRECATED to be removed in v2.18 + if meta.TLS { + logger.Info("The 'tls' setting is DEPRECATED and will be removed in v2.18 - Use 'unsafeSsl' instead") + meta.UnsafeSsl = meta.TLS + } + + httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, meta.UnsafeSsl) - // Configure TLS if cert and key are specified - if meta.cert != "" && meta.key != "" { - tlsConfig, err := kedautil.NewTLSConfigWithPassword(meta.cert, meta.key, meta.keyPassword, meta.ca, meta.unsafeSsl) + if meta.Cert != "" && meta.Key != "" { + tlsConfig, err := kedautil.NewTLSConfigWithPassword(meta.Cert, meta.Key, meta.KeyPassword, meta.CA, meta.UnsafeSsl) if err != nil { return nil, err } httpClient.Transport = kedautil.CreateHTTPTransportWithTLSConfig(tlsConfig) } - return &IBMMQScaler{ - metricType: metricType, - metadata: meta, - defaultHTTPTimeout: config.GlobalHTTPTimeout, - httpClient: httpClient, - logger: InitializeLogger(config, "ibm_mq_scaler"), + return &ibmmqScaler{ + metricType: metricType, + metadata: meta, + httpClient: httpClient, + logger: logger, }, nil } -// Close closes and returns nil -func (s *IBMMQScaler) Close(context.Context) error { +func (s *ibmmqScaler) Close(context.Context) error { if s.httpClient != nil { s.httpClient.CloseIdleConnections() } return nil } -// parseIBMMQMetadata checks the existence of and validates the MQ connection data provided -func parseIBMMQMetadata(config *scalersconfig.ScalerConfig) (*IBMMQMetadata, error) { - meta := IBMMQMetadata{} - - if val, ok := config.TriggerMetadata["host"]; ok { - _, err := url.ParseRequestURI(val) - if err != nil { - return nil, fmt.Errorf("invalid URL: %w", err) - } - meta.host = val - } else { - return nil, fmt.Errorf("no host URI given") - } - - if val, ok := config.TriggerMetadata["queueManager"]; ok { - meta.queueManager = val - } else { - return nil, fmt.Errorf("no queue manager given") - } - - if val, ok := config.TriggerMetadata["queueName"]; ok { - meta.queueName = val - } else { - return nil, fmt.Errorf("no queue name given") +func parseIBMMQMetadata(config *scalersconfig.ScalerConfig) (ibmmqMetadata, error) { + meta := ibmmqMetadata{triggerIndex: config.TriggerIndex} + if err := config.TypedConfig(&meta); err != nil { + return meta, err } - - if val, ok := config.TriggerMetadata["queueDepth"]; ok && val != "" { - queueDepth, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid queueDepth - must be an integer") - } - meta.queueDepth = queueDepth - } else { - fmt.Println("No target depth defined - setting default") - meta.queueDepth = defaultTargetQueueDepth - } - - meta.activationQueueDepth = 0 - if val, ok := config.TriggerMetadata["activationQueueDepth"]; ok && val != "" { - activationQueueDepth, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid activationQueueDepth - must be an integer") - } - meta.activationQueueDepth = activationQueueDepth - } - - if val, ok := config.TriggerMetadata["tls"]; ok { - tlsDisabled, err := strconv.ParseBool(val) - if err != nil { - return nil, fmt.Errorf("invalid tls setting: %w", err) - } - meta.tlsDisabled = tlsDisabled - } else { - fmt.Println("No tls setting defined - setting default") - meta.tlsDisabled = defaultTLSDisabled - } - - if val, ok := config.AuthParams["username"]; ok && val != "" { - meta.username = val - } else if val, ok := config.TriggerMetadata["usernameFromEnv"]; ok && val != "" { - meta.username = config.ResolvedEnv[val] - } else { - return nil, fmt.Errorf("no username given") - } - - if val, ok := config.AuthParams["password"]; ok && val != "" { - meta.password = val - } else if val, ok := config.TriggerMetadata["passwordFromEnv"]; ok && val != "" { - meta.password = config.ResolvedEnv[val] - } else { - return nil, fmt.Errorf("no password given") - } - - // TLS config (optional) - meta.ca = config.AuthParams["ca"] - meta.cert = config.AuthParams["cert"] - meta.key = config.AuthParams["key"] - meta.keyPassword = config.AuthParams["keyPassword"] - - meta.unsafeSsl = false - if val, ok := config.TriggerMetadata["unsafeSsl"]; ok { - boolVal, err := strconv.ParseBool(val) - if err != nil { - return nil, fmt.Errorf("failed to parse unsafeSsl value. Must be either true or false") - } - meta.unsafeSsl = boolVal - } - - meta.triggerIndex = config.TriggerIndex - return &meta, nil + return meta, nil } -// getQueueDepthViaHTTP returns the depth of the MQ Queue from the Admin endpoint -func (s *IBMMQScaler) getQueueDepthViaHTTP(ctx context.Context) (int64, error) { - queue := s.metadata.queueName - url := s.metadata.host +func (s *ibmmqScaler) getQueueDepthViaHTTP(ctx context.Context) (int64, error) { + queue := s.metadata.QueueName + url := s.metadata.Host var requestJSON = []byte(`{"type": "runCommandJSON", "command": "display", "qualifier": "qlocal", "name": "` + queue + `", "responseParameters" : ["CURDEPTH"]}`) req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(requestJSON)) @@ -216,7 +140,7 @@ func (s *IBMMQScaler) getQueueDepthViaHTTP(ctx context.Context) (int64, error) { req.Header.Set("ibm-mq-rest-csrf-token", "value") req.Header.Set("Content-Type", "application/json") - req.SetBasicAuth(s.metadata.username, s.metadata.password) + req.SetBasicAuth(s.metadata.Username, s.metadata.Password) resp, err := s.httpClient.Do(req) if err != nil { @@ -251,20 +175,19 @@ func (s *IBMMQScaler) getQueueDepthViaHTTP(ctx context.Context) (int64, error) { return int64(response.CommandResponse[0].Parameters.Curdepth), nil } -// GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler -func (s *IBMMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { +func (s *ibmmqScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + metricName := kedautil.NormalizeString(fmt.Sprintf("ibmmq-%s", s.metadata.QueueName)) externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("ibmmq-%s", s.metadata.queueName))), + Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, metricName), }, - Target: GetMetricTarget(s.metricType, s.metadata.queueDepth), + Target: GetMetricTarget(s.metricType, s.metadata.QueueDepth), } metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2.MetricSpec{metricSpec} } -// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric -func (s *IBMMQScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { +func (s *ibmmqScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { queueDepth, err := s.getQueueDepthViaHTTP(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error inspecting IBM MQ queue depth: %w", err) @@ -272,5 +195,5 @@ func (s *IBMMQScaler) GetMetricsAndActivity(ctx context.Context, metricName stri metric := GenerateMetricInMili(metricName, float64(queueDepth)) - return []external_metrics.ExternalMetricValue{metric}, queueDepth > s.metadata.activationQueueDepth, nil + return []external_metrics.ExternalMetricValue{metric}, queueDepth > s.metadata.ActivationQueueDepth, nil } diff --git a/pkg/scalers/ibmmq_scaler_test.go b/pkg/scalers/ibmmq_scaler_test.go index c3e2430043f..30a7fc4b132 100644 --- a/pkg/scalers/ibmmq_scaler_test.go +++ b/pkg/scalers/ibmmq_scaler_test.go @@ -7,7 +7,6 @@ import ( "net/http" "net/http/httptest" "testing" - "time" "github.com/stretchr/testify/assert" @@ -16,8 +15,9 @@ import ( // Test host URLs for validation const ( - testValidMQQueueURL = "https://qmtest.qm2.eu-gb.mq.appdomain.cloud/ibmmq/rest/v2/admin/action/qmgr/QM1/mqsc" - testInvalidMQQueueURL = "testInvalidURL.com" + testValidMQQueueURL = "https://qmtest.qm2.eu-gb.mq.appdomain.cloud/ibmmq/rest/v2/admin/action/qmgr/QM1/mqsc" + testInvalidMQQueueURL = "testInvalidURL.com" + defaultTargetQueueDepth = 20 ) // Test data struct used for TestIBMMQParseMetadata @@ -50,29 +50,29 @@ var testIBMMQMetadata = []parseIBMMQMetadataTestData{ // Nothing passed {map[string]string{}, true, map[string]string{}}, // Properly formed metadata - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, false, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "10"}, false, map[string]string{"username": "testUsername", "password": "Pass123"}}, // Invalid queueDepth using a string - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "AA"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "AA"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, // Invalid activationQueueDepth using a string - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "1", "activationQueueDepth": "AA"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "1", "activationQueueDepth": "AA"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, // No host provided - {map[string]string{"queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, - // Missing queueManager - {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, // Missing queueName - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"host": testValidMQQueueURL, "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, // Invalid URL - {map[string]string{"host": testInvalidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"host": testInvalidMQQueueURL, "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, // Properly formed authParams Basic Auth - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, false, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "10"}, false, map[string]string{"username": "testUsername", "password": "Pass123"}}, // Properly formed authParams Basic Auth and TLS - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, false, map[string]string{"username": "testUsername", "password": "Pass123", "ca": "cavalue", "cert": "certvalue", "key": "keyvalue"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "10"}, false, map[string]string{"username": "testUsername", "password": "Pass123", "ca": "cavalue", "cert": "certvalue", "key": "keyvalue"}}, + // No key provided + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123", "ca": "cavalue", "cert": "certvalue"}}, // No username provided - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"password": "Pass123"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"password": "Pass123"}}, // No password provided - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername"}}, // Wrong input unsafeSsl - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10", "unsafeSsl": "random"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue", "queueDepth": "10", "unsafeSsl": "random"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, } // Test MQ Connection metadata is parsed correctly @@ -89,8 +89,8 @@ func TestIBMMQParseMetadata(t *testing.T) { t.Error("Expected error but got success") fmt.Println(testData) } - if metadata != nil && metadata.password != "" && metadata.password != testData.authParams["password"] { - t.Error("Expected password from configuration but found something else: ", metadata.password) + if metadata != (ibmmqMetadata{}) && metadata.Password != "" && metadata.Password != testData.authParams["password"] { + t.Error("Expected password from configuration but found something else: ", metadata.Password) fmt.Println(testData) } } @@ -98,7 +98,7 @@ func TestIBMMQParseMetadata(t *testing.T) { // Test case for TestParseDefaultQueueDepth test var testDefaultQueueDepth = []parseIBMMQMetadataTestData{ - {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue"}, false, map[string]string{"username": "testUsername", "password": "Pass123"}}, + {map[string]string{"host": testValidMQQueueURL, "queueName": "testQueue"}, false, map[string]string{"username": "testUsername", "password": "Pass123"}}, } // Test that DefaultQueueDepth is set when queueDepth is not provided @@ -110,8 +110,8 @@ func TestParseDefaultQueueDepth(t *testing.T) { t.Error("Expected success but got error", err) case testData.isError && err == nil: t.Error("Expected error but got success") - case metadata.queueDepth != defaultTargetQueueDepth: - t.Error("Expected default queueDepth =", defaultTargetQueueDepth, "but got", metadata.queueDepth) + case metadata.QueueDepth != defaultTargetQueueDepth: + t.Error("Expected default queueDepth =", defaultTargetQueueDepth, "but got", metadata.QueueDepth) } } } @@ -120,14 +120,12 @@ func TestParseDefaultQueueDepth(t *testing.T) { func TestIBMMQGetMetricSpecForScaling(t *testing.T) { for _, testData := range IBMMQMetricIdentifiers { metadata, err := parseIBMMQMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: sampleIBMMQResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}) - httpTimeout := 100 * time.Millisecond if err != nil { t.Fatal("Could not parse metadata:", err) } - mockIBMMQScaler := IBMMQScaler{ - metadata: metadata, - defaultHTTPTimeout: httpTimeout, + mockIBMMQScaler := ibmmqScaler{ + metadata: metadata, } metricSpec := mockIBMMQScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name @@ -216,9 +214,9 @@ func TestIBMMQScalerGetQueueDepthViaHTTP(t *testing.T) { })) defer server.Close() - scaler := IBMMQScaler{ - metadata: &IBMMQMetadata{ - host: server.URL, + scaler := ibmmqScaler{ + metadata: ibmmqMetadata{ + Host: server.URL, }, httpClient: server.Client(), } diff --git a/tests/scalers/ibmmq/ibmmq_test.go b/tests/scalers/ibmmq/ibmmq_test.go index 82a1c6ccaeb..69bcf459e3b 100644 --- a/tests/scalers/ibmmq/ibmmq_test.go +++ b/tests/scalers/ibmmq/ibmmq_test.go @@ -203,9 +203,8 @@ spec: queueDepth: "3" activationQueueDepth: "{{.ActivationQueueDepth}}" host: {{.MqscAdminRestEndpoint}} - queueManager: {{.QueueManagerName}} queueName: {{.QueueName}} - tls: "true" + unsafeSsl: "true" usernameFromEnv: "" passwordFromEnv: "" authenticationRef: From d1fbf480b6eaf1a159bb422c4a8aa24e332c7a6a Mon Sep 17 00:00:00 2001 From: Rob Pickerill Date: Tue, 20 Aug 2024 22:38:47 +0100 Subject: [PATCH 10/16] add ignoreNullValues for AWS CloudWatch Scaler (#5635) * add errorWhenMetricValueEmpty Signed-off-by: Rob Pickerill * fix golangci-lint Signed-off-by: Rob Pickerill * improve error message for empty results Signed-off-by: Rob Pickerill * add error when empty metric values to changelog Signed-off-by: Rob Pickerill * rename errorWhenMetricValuesEmpty -> errorWhenNullValues Signed-off-by: Rob Pickerill * use getParameterFromConfigV2 to read config for errorWhenNullValues Signed-off-by: Rob Pickerill * add e2e for error state for cw, and improve e2e for min values for cw Signed-off-by: Rob Pickerill * remove erroneous print statement Signed-off-by: Rob Pickerill * remove unused vars Signed-off-by: Rob Pickerill * rename errorWhenMetricValuesEmpty -> ignoreNullValues Signed-off-by: Rob Pickerill * move towards shared package for e2e aws Signed-off-by: Rob Pickerill * minMetricValue optionality based on ignoreNullValues Signed-off-by: Rob Pickerill * fail fast Co-authored-by: Jorge Turrado Ferrero Signed-off-by: Rob Pickerill * Update tests/scalers/aws/aws_cloudwatch_ignore_null_values_false/aws_cloudwatch_ignore_null_values_false_test.go Co-authored-by: Jorge Turrado Ferrero Signed-off-by: Rob Pickerill * Apply suggestions from code review Co-authored-by: Jorge Turrado Ferrero Signed-off-by: Rob Pickerill * fail fast Signed-off-by: Rob Pickerill * fix broken new line Signed-off-by: Rob Pickerill * fix broken new lines Signed-off-by: Rob Pickerill * assert no scaling changes in e2e, and set false for required in minMetricValue Signed-off-by: robpickerill * fix ci checks Signed-off-by: robpickerill * Update tests/scalers/aws/aws_cloudwatch_min_metric_value/aws_cloudwatch_min_metric_value_test.go fix invalid check Co-authored-by: Jorge Turrado Ferrero Signed-off-by: Rob Pickerill * fix merge conflicts Signed-off-by: robpickerill * fix e2e package names Signed-off-by: robpickerill --------- Signed-off-by: Rob Pickerill Signed-off-by: robpickerill Co-authored-by: Jorge Turrado Ferrero Signed-off-by: Fira Curie --- CHANGELOG.md | 5 +- pkg/scalers/aws_cloudwatch_scaler.go | 14 +- pkg/scalers/aws_cloudwatch_scaler_test.go | 655 +++++++++++------- tests/helper/helper.go | 32 +- ...loudwatch_ignore_null_values_false_test.go | 190 +++++ .../aws_cloudwatch_min_metric_value_test.go | 199 ++++++ .../aws/helpers/cloudwatch/cloudwatch.go | 70 ++ 7 files changed, 889 insertions(+), 276 deletions(-) create mode 100644 tests/scalers/aws/aws_cloudwatch_ignore_null_values_false/aws_cloudwatch_ignore_null_values_false_test.go create mode 100644 tests/scalers/aws/aws_cloudwatch_min_metric_value/aws_cloudwatch_min_metric_value_test.go create mode 100644 tests/scalers/aws/helpers/cloudwatch/cloudwatch.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 25ef88e5827..928a8128c72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,7 +68,10 @@ Here is an overview of all new **experimental** features: ### Improvements -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **AWS CloudWatch Scaler**: Add support for ignoreNullValues ([#5352](https://github.com/kedacore/keda/issues/5352)) +- **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) +- **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) +- **Kafka**: Fix logic to scale to zero on invalid offset even with earliest offsetResetPolicy ([#5689](https://github.com/kedacore/keda/issues/5689)) ### Fixes diff --git a/pkg/scalers/aws_cloudwatch_scaler.go b/pkg/scalers/aws_cloudwatch_scaler.go index cd12a91976d..a07db246f80 100644 --- a/pkg/scalers/aws_cloudwatch_scaler.go +++ b/pkg/scalers/aws_cloudwatch_scaler.go @@ -37,6 +37,7 @@ type awsCloudwatchMetadata struct { TargetMetricValue float64 `keda:"name=targetMetricValue, order=triggerMetadata"` ActivationTargetMetricValue float64 `keda:"name=activationTargetMetricValue, order=triggerMetadata, optional"` MinMetricValue float64 `keda:"name=minMetricValue, order=triggerMetadata"` + IgnoreNullValues bool `keda:"name=ignoreNullValues, order=triggerMetadata, optional, default=true"` MetricCollectionTime int64 `keda:"name=metricCollectionTime, order=triggerMetadata, optional, default=300"` MetricStat string `keda:"name=metricStat, order=triggerMetadata, optional, default=Average"` @@ -191,7 +192,6 @@ func computeQueryWindow(current time.Time, metricPeriodSec, metricEndTimeOffsetS func (s *awsCloudwatchScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { metricValue, err := s.GetCloudwatchMetrics(ctx) - if err != nil { s.logger.Error(err, "Error getting metric value") return []external_metrics.ExternalMetricValue{}, false, err @@ -274,20 +274,28 @@ func (s *awsCloudwatchScaler) GetCloudwatchMetrics(ctx context.Context) (float64 } output, err := s.cwClient.GetMetricData(ctx, &input) - if err != nil { s.logger.Error(err, "Failed to get output") return -1, err } s.logger.V(1).Info("Received Metric Data", "data", output) + + // If no metric data results or the first result has no values, and ignoreNullValues is false, + // the scaler should return an error to prevent any further scaling actions. + if len(output.MetricDataResults) > 0 && len(output.MetricDataResults[0].Values) == 0 && !s.metadata.IgnoreNullValues { + emptyMetricsErrMsg := "empty metric data received, ignoreNullValues is false, returning error" + s.logger.Error(nil, emptyMetricsErrMsg) + return -1, fmt.Errorf(emptyMetricsErrMsg) + } + var metricValue float64 + if len(output.MetricDataResults) > 0 && len(output.MetricDataResults[0].Values) > 0 { metricValue = output.MetricDataResults[0].Values[0] } else { s.logger.Info("empty metric data received, returning minMetricValue") metricValue = s.metadata.MinMetricValue } - return metricValue, nil } diff --git a/pkg/scalers/aws_cloudwatch_scaler_test.go b/pkg/scalers/aws_cloudwatch_scaler_test.go index 35290154e80..da6e85fc2c7 100644 --- a/pkg/scalers/aws_cloudwatch_scaler_test.go +++ b/pkg/scalers/aws_cloudwatch_scaler_test.go @@ -22,6 +22,7 @@ const ( testAWSCloudwatchSessionToken = "none" testAWSCloudwatchErrorMetric = "Error" testAWSCloudwatchNoValueMetric = "NoValue" + testAWSCloudwatchEmptyValues = "EmptyValues" ) var testAWSCloudwatchResolvedEnv = map[string]string{ @@ -50,312 +51,433 @@ type awsCloudwatchMetricIdentifier struct { var testAWSCloudwatchMetadata = []parseAWSCloudwatchMetadataTestData{ {map[string]string{}, testAWSAuthentication, true, "Empty structures"}, // properly formed cloudwatch query and awsRegion - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "activationTargetMetricValue": "0", - "minMetricValue": "0", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "activationTargetMetricValue": "0", + "minMetricValue": "0", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, false, - "properly formed cloudwatch query and awsRegion"}, + "properly formed cloudwatch query and awsRegion", + }, // properly formed cloudwatch expression query and awsRegion - {map[string]string{ - "namespace": "AWS/SQS", - "expression": "SELECT MIN(MessageCount) FROM \"AWS/AmazonMQ\" WHERE Broker = 'production' and Queue = 'worker'", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "activationTargetMetricValue": "0", - "minMetricValue": "0", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "expression": "SELECT MIN(MessageCount) FROM \"AWS/AmazonMQ\" WHERE Broker = 'production' and Queue = 'worker'", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "activationTargetMetricValue": "0", + "minMetricValue": "0", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, false, - "properly formed cloudwatch expression query and awsRegion"}, + "properly formed cloudwatch expression query and awsRegion", + }, // Properly formed cloudwatch query with optional parameters - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "activationTargetMetricValue": "0", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStat": "Average", - "metricStatPeriod": "300", - "awsRegion": "eu-west-1", - "awsEndpoint": "http://localhost:4566"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "activationTargetMetricValue": "0", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStat": "Average", + "metricStatPeriod": "300", + "awsRegion": "eu-west-1", + "awsEndpoint": "http://localhost:4566", + }, testAWSAuthentication, false, - "Properly formed cloudwatch query with optional parameters"}, + "Properly formed cloudwatch query with optional parameters", + }, // properly formed cloudwatch query but Region is empty - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "activationTargetMetricValue": "0", - "minMetricValue": "0", - "awsRegion": ""}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "activationTargetMetricValue": "0", + "minMetricValue": "0", + "awsRegion": "", + }, testAWSAuthentication, true, - "properly formed cloudwatch query but Region is empty"}, + "properly formed cloudwatch query but Region is empty", + }, // Missing namespace - {map[string]string{"dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "Missing namespace"}, + "Missing namespace", + }, // Missing dimensionName - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "Missing dimensionName"}, + "Missing dimensionName", + }, // Missing dimensionValue - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "Missing dimensionValue"}, + "Missing dimensionValue", + }, // Missing metricName - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "targetMetricValue": "2", - "minMetricValue": "0", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "targetMetricValue": "2", + "minMetricValue": "0", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "Missing metricName"}, + "Missing metricName", + }, // with static "aws_credentials" from TriggerAuthentication - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStat": "Average", - "metricStatPeriod": "300", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStat": "Average", + "metricStatPeriod": "300", + "awsRegion": "eu-west-1", + }, map[string]string{ "awsAccessKeyId": testAWSCloudwatchAccessKeyID, "awsSecretAccessKey": testAWSCloudwatchSecretAccessKey, }, false, - "with AWS Credentials from TriggerAuthentication"}, + "with AWS Credentials from TriggerAuthentication", + }, // with temporary "aws_credentials" from TriggerAuthentication - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStat": "Average", - "metricStatPeriod": "300", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStat": "Average", + "metricStatPeriod": "300", + "awsRegion": "eu-west-1", + }, map[string]string{ "awsAccessKeyId": testAWSCloudwatchAccessKeyID, "awsSecretAccessKey": testAWSCloudwatchSecretAccessKey, "awsSessionToken": testAWSCloudwatchSessionToken, }, false, - "with AWS Credentials from TriggerAuthentication"}, + "with AWS Credentials from TriggerAuthentication", + }, // with "aws_role" from TriggerAuthentication - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStat": "Average", - "metricStatPeriod": "300", - "awsRegion": "eu-west-1"}, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStat": "Average", + "metricStatPeriod": "300", + "awsRegion": "eu-west-1", + }, map[string]string{ "awsRoleArn": testAWSCloudwatchRoleArn, }, false, - "with AWS Role from TriggerAuthentication"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStat": "Average", - "metricStatPeriod": "300", - "awsRegion": "eu-west-1", - "identityOwner": "operator"}, + "with AWS Role from TriggerAuthentication", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStat": "Average", + "metricStatPeriod": "300", + "awsRegion": "eu-west-1", + "identityOwner": "operator", + }, map[string]string{}, false, - "with AWS Role assigned on KEDA operator itself"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "a", - "metricStat": "Average", - "metricStatPeriod": "300", - "awsRegion": "eu-west-1", - "identityOwner": "operator"}, + "with AWS Role assigned on KEDA operator itself", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "a", + "metricStat": "Average", + "metricStatPeriod": "300", + "awsRegion": "eu-west-1", + "identityOwner": "operator", + }, map[string]string{}, true, - "if metricCollectionTime assigned with a string, need to be a number"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStat": "Average", - "metricStatPeriod": "a", - "awsRegion": "eu-west-1", - "identityOwner": "operator"}, + "if metricCollectionTime assigned with a string, need to be a number", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStat": "Average", + "metricStatPeriod": "a", + "awsRegion": "eu-west-1", + "identityOwner": "operator", + }, map[string]string{}, true, - "if metricStatPeriod assigned with a string, need to be a number"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricStat": "Average", - "metricStatPeriod": "300", - "awsRegion": "eu-west-1"}, + "if metricStatPeriod assigned with a string, need to be a number", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStat": "Average", + "metricStatPeriod": "300", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, false, - "Missing metricCollectionTime not generate error because will get the default value"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStatPeriod": "300", - "awsRegion": "eu-west-1"}, + "Missing metricCollectionTime not generate error because will get the default value", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStatPeriod": "300", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, false, - "Missing metricStat not generate error because will get the default value"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStat": "Average", - "awsRegion": "eu-west-1"}, + "Missing metricStat not generate error because will get the default value", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStat": "Average", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, false, - "Missing metricStatPeriod not generate error because will get the default value"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricStat": "Average", - "metricUnit": "Count", - "metricEndTimeOffset": "60", - "awsRegion": "eu-west-1"}, + "Missing metricStatPeriod not generate error because will get the default value", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStat": "Average", + "metricUnit": "Count", + "metricEndTimeOffset": "60", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, false, - "set a supported metricUnit"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricCollectionTime": "300", - "metricStat": "SomeStat", - "awsRegion": "eu-west-1"}, + "set a supported metricUnit", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricCollectionTime": "300", + "metricStat": "SomeStat", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "metricStat is not supported"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricStatPeriod": "300", - "metricCollectionTime": "100", - "metricStat": "Average", - "awsRegion": "eu-west-1"}, + "metricStat is not supported", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStatPeriod": "300", + "metricCollectionTime": "100", + "metricStat": "Average", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "metricCollectionTime smaller than metricStatPeriod"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricStatPeriod": "250", - "metricStat": "Average", - "awsRegion": "eu-west-1"}, + "metricCollectionTime smaller than metricStatPeriod", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStatPeriod": "250", + "metricStat": "Average", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "unsupported metricStatPeriod"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricStatPeriod": "25", - "metricStat": "Average", - "awsRegion": "eu-west-1"}, + "unsupported metricStatPeriod", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStatPeriod": "25", + "metricStat": "Average", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "unsupported metricStatPeriod"}, - {map[string]string{ - "namespace": "AWS/SQS", - "dimensionName": "QueueName", - "dimensionValue": "keda", - "metricName": "ApproximateNumberOfMessagesVisible", - "targetMetricValue": "2", - "minMetricValue": "0", - "metricStatPeriod": "25", - "metricStat": "Average", - "metricUnit": "Hour", - "awsRegion": "eu-west-1"}, + "unsupported metricStatPeriod", + }, + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStatPeriod": "25", + "metricStat": "Average", + "metricUnit": "Hour", + "awsRegion": "eu-west-1", + }, + testAWSAuthentication, true, + "unsupported metricUnit", + }, + // test ignoreNullValues is false + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStatPeriod": "60", + "metricStat": "Average", + "ignoreNullValues": "false", + "awsRegion": "eu-west-1", + }, + testAWSAuthentication, false, + "with ignoreNullValues set to false", + }, + // test ignoreNullValues is true + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStatPeriod": "60", + "metricStat": "Average", + "ignoreNullValues": "true", + "awsRegion": "eu-west-1", + }, + testAWSAuthentication, false, + "with ignoreNullValues set to true", + }, + // test ignoreNullValues is incorrect + { + map[string]string{ + "namespace": "AWS/SQS", + "dimensionName": "QueueName", + "dimensionValue": "keda", + "metricName": "ApproximateNumberOfMessagesVisible", + "targetMetricValue": "2", + "minMetricValue": "0", + "metricStatPeriod": "60", + "metricStat": "Average", + "ignoreNullValues": "maybe", + "awsRegion": "eu-west-1", + }, testAWSAuthentication, true, - "unsupported metricUnit"}, + "unsupported value for ignoreNullValues", + }, } var awsCloudwatchMetricIdentifiers = []awsCloudwatchMetricIdentifier{ @@ -397,6 +519,22 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{ awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false}, triggerIndex: 0, }, + { + Namespace: "Custom", + MetricsName: "HasDataFromExpression", + Expression: "SELECT MIN(MessageCount) FROM \"AWS/AmazonMQ\" WHERE Broker = 'production' and Queue = 'worker'", + TargetMetricValue: 100, + MinMetricValue: 0, + MetricCollectionTime: 60, + MetricStat: "Average", + MetricUnit: "SampleCount", + MetricStatPeriod: 60, + MetricEndTimeOffset: 60, + AwsRegion: "us-west-2", + awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false}, + triggerIndex: 0, + }, + // Test for metric with no data, no error expected as we are ignoring null values { Namespace: "Custom", MetricsName: testAWSCloudwatchErrorMetric, @@ -413,6 +551,7 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{ awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false}, triggerIndex: 0, }, + // Test for metric with no data, and the scaler errors when metric data values are empty { Namespace: "Custom", MetricsName: testAWSCloudwatchNoValueMetric, @@ -458,6 +597,14 @@ func (m *mockCloudwatch) GetMetricData(_ context.Context, input *cloudwatch.GetM return &cloudwatch.GetMetricDataOutput{ MetricDataResults: []types.MetricDataResult{}, }, nil + case testAWSCloudwatchEmptyValues: + return &cloudwatch.GetMetricDataOutput{ + MetricDataResults: []types.MetricDataResult{ + { + Values: []float64{}, + }, + }, + }, nil } } @@ -508,6 +655,12 @@ func TestAWSCloudwatchScalerGetMetrics(t *testing.T) { assert.Error(t, err, "expect error because of cloudwatch api error") case testAWSCloudwatchNoValueMetric: assert.NoError(t, err, "dont expect error when returning empty metric list from cloudwatch") + case testAWSCloudwatchEmptyValues: + if meta.IgnoreNullValues { + assert.NoError(t, err, "dont expect error when returning empty metric list from cloudwatch") + } else { + assert.Error(t, err, "expect error when returning empty metric list from cloudwatch, because ignoreNullValues is false") + } default: assert.EqualValues(t, int64(10.0), value[0].Value.Value()) } diff --git a/tests/helper/helper.go b/tests/helper/helper.go index ef653c1b12d..a21adb0c48f 100644 --- a/tests/helper/helper.go +++ b/tests/helper/helper.go @@ -307,18 +307,15 @@ func WaitForNamespaceDeletion(t *testing.T, nsName string) bool { return false } -func WaitForScaledJobCount(t *testing.T, kc *kubernetes.Clientset, scaledJobName, namespace string, - target, iterations, intervalSeconds int) bool { +func WaitForScaledJobCount(t *testing.T, kc *kubernetes.Clientset, scaledJobName, namespace string, target, iterations, intervalSeconds int) bool { return waitForJobCount(t, kc, fmt.Sprintf("scaledjob.keda.sh/name=%s", scaledJobName), namespace, target, iterations, intervalSeconds) } -func WaitForJobCount(t *testing.T, kc *kubernetes.Clientset, namespace string, - target, iterations, intervalSeconds int) bool { +func WaitForJobCount(t *testing.T, kc *kubernetes.Clientset, namespace string, target, iterations, intervalSeconds int) bool { return waitForJobCount(t, kc, "", namespace, target, iterations, intervalSeconds) } -func waitForJobCount(t *testing.T, kc *kubernetes.Clientset, selector, namespace string, - target, iterations, intervalSeconds int) bool { +func waitForJobCount(t *testing.T, kc *kubernetes.Clientset, selector, namespace string, target, iterations, intervalSeconds int) bool { for i := 0; i < iterations; i++ { jobList, _ := kc.BatchV1().Jobs(namespace).List(context.Background(), metav1.ListOptions{ LabelSelector: selector, @@ -338,9 +335,8 @@ func waitForJobCount(t *testing.T, kc *kubernetes.Clientset, selector, namespace return false } -func WaitForJobCountUntilIteration(t *testing.T, kc *kubernetes.Clientset, namespace string, - target, iterations, intervalSeconds int) bool { - var isTargetAchieved = false +func WaitForJobCountUntilIteration(t *testing.T, kc *kubernetes.Clientset, namespace string, target, iterations, intervalSeconds int) bool { + isTargetAchieved := false for i := 0; i < iterations; i++ { jobList, _ := kc.BatchV1().Jobs(namespace).List(context.Background(), metav1.ListOptions{}) @@ -362,8 +358,7 @@ func WaitForJobCountUntilIteration(t *testing.T, kc *kubernetes.Clientset, names } // Waits until deployment count hits target or number of iterations are done. -func WaitForPodCountInNamespace(t *testing.T, kc *kubernetes.Clientset, namespace string, - target, iterations, intervalSeconds int) bool { +func WaitForPodCountInNamespace(t *testing.T, kc *kubernetes.Clientset, namespace string, target, iterations, intervalSeconds int) bool { for i := 0; i < iterations; i++ { pods, _ := kc.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) @@ -408,8 +403,7 @@ func WaitForAllPodRunningInNamespace(t *testing.T, kc *kubernetes.Clientset, nam // Waits until the Horizontal Pod Autoscaler for the scaledObject reports that it has metrics available // to calculate, or until the number of iterations are done, whichever happens first. -func WaitForHPAMetricsToPopulate(t *testing.T, kc *kubernetes.Clientset, name, namespace string, - iterations, intervalSeconds int) bool { +func WaitForHPAMetricsToPopulate(t *testing.T, kc *kubernetes.Clientset, name, namespace string, iterations, intervalSeconds int) bool { totalWaitDuration := time.Duration(iterations) * time.Duration(intervalSeconds) * time.Second startedWaiting := time.Now() for i := 0; i < iterations; i++ { @@ -434,8 +428,7 @@ func WaitForHPAMetricsToPopulate(t *testing.T, kc *kubernetes.Clientset, name, n } // Waits until deployment ready replica count hits target or number of iterations are done. -func WaitForDeploymentReplicaReadyCount(t *testing.T, kc *kubernetes.Clientset, name, namespace string, - target, iterations, intervalSeconds int) bool { +func WaitForDeploymentReplicaReadyCount(t *testing.T, kc *kubernetes.Clientset, name, namespace string, target, iterations, intervalSeconds int) bool { for i := 0; i < iterations; i++ { deployment, _ := kc.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) replicas := deployment.Status.ReadyReplicas @@ -454,8 +447,7 @@ func WaitForDeploymentReplicaReadyCount(t *testing.T, kc *kubernetes.Clientset, } // Waits until statefulset count hits target or number of iterations are done. -func WaitForStatefulsetReplicaReadyCount(t *testing.T, kc *kubernetes.Clientset, name, namespace string, - target, iterations, intervalSeconds int) bool { +func WaitForStatefulsetReplicaReadyCount(t *testing.T, kc *kubernetes.Clientset, name, namespace string, target, iterations, intervalSeconds int) bool { for i := 0; i < iterations; i++ { statefulset, _ := kc.AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) replicas := statefulset.Status.ReadyReplicas @@ -516,8 +508,7 @@ func AssertReplicaCountNotChangeDuringTimePeriod(t *testing.T, kc *kubernetes.Cl } } -func WaitForHpaCreation(t *testing.T, kc *kubernetes.Clientset, name, namespace string, - iterations, intervalSeconds int) (*autoscalingv2.HorizontalPodAutoscaler, error) { +func WaitForHpaCreation(t *testing.T, kc *kubernetes.Clientset, name, namespace string, iterations, intervalSeconds int) (*autoscalingv2.HorizontalPodAutoscaler, error) { hpa := &autoscalingv2.HorizontalPodAutoscaler{} var err error for i := 0; i < iterations; i++ { @@ -754,8 +745,7 @@ func DeletePodsInNamespaceBySelector(t *testing.T, kc *kubernetes.Clientset, sel } // Wait for Pods identified by selector to complete termination -func WaitForPodsTerminated(t *testing.T, kc *kubernetes.Clientset, selector, namespace string, - iterations, intervalSeconds int) bool { +func WaitForPodsTerminated(t *testing.T, kc *kubernetes.Clientset, selector, namespace string, iterations, intervalSeconds int) bool { for i := 0; i < iterations; i++ { pods, err := kc.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: selector}) if (err != nil && errors.IsNotFound(err)) || len(pods.Items) == 0 { diff --git a/tests/scalers/aws/aws_cloudwatch_ignore_null_values_false/aws_cloudwatch_ignore_null_values_false_test.go b/tests/scalers/aws/aws_cloudwatch_ignore_null_values_false/aws_cloudwatch_ignore_null_values_false_test.go new file mode 100644 index 00000000000..ddf4e1f08e3 --- /dev/null +++ b/tests/scalers/aws/aws_cloudwatch_ignore_null_values_false/aws_cloudwatch_ignore_null_values_false_test.go @@ -0,0 +1,190 @@ +//go:build e2e +// +build e2e + +package aws_cloudwatch_ignore_null_values_false_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + . "github.com/kedacore/keda/v2/tests/helper" + "github.com/kedacore/keda/v2/tests/scalers/aws/helpers/cloudwatch" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "aws-cloudwatch-ignore-null-values-false-test" +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + SecretName string + AwsAccessKeyID string + AwsSecretAccessKey string + AwsRegion string + CloudWatchMetricName string + CloudWatchMetricNamespace string + CloudWatchMetricDimensionName string + CloudWatchMetricDimensionValue string +} + +const ( + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + AWS_ACCESS_KEY_ID: {{.AwsAccessKeyID}} + AWS_SECRET_ACCESS_KEY: {{.AwsSecretAccessKey}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: keda-trigger-auth-aws-credentials + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: awsAccessKeyID # Required. + name: {{.SecretName}} # Required. + key: AWS_ACCESS_KEY_ID # Required. + - parameter: awsSecretAccessKey # Required. + name: {{.SecretName}} # Required. + key: AWS_SECRET_ACCESS_KEY # Required. +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 2 + minReplicaCount: 0 + cooldownPeriod: 1 + triggers: + - type: aws-cloudwatch + authenticationRef: + name: keda-trigger-auth-aws-credentials + metadata: + awsRegion: {{.AwsRegion}} + namespace: {{.CloudWatchMetricNamespace}} + dimensionName: {{.CloudWatchMetricDimensionName}} + dimensionValue: {{.CloudWatchMetricDimensionValue}} + metricName: {{.CloudWatchMetricName}} + targetMetricValue: "1" + minMetricValue: "1" + ignoreNullValues: "false" + metricCollectionTime: "120" + metricStatPeriod: "60" +` +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + cloudwatchMetricName = fmt.Sprintf("cw-%d", GetRandomNumber()) + awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY") + awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY") + awsRegion = os.Getenv("TF_AWS_REGION") + cloudwatchMetricNamespace = "DoesNotExist" + cloudwatchMetricDimensionName = "dimensionName" + cloudwatchMetricDimensionValue = "dimensionValue" + minReplicaCount = 0 +) + +// This test is to verify that the scaler results in an error state when +// the metric query returns null values and the ignoreNullValues is set to false. +func TestCloudWatchScalerWithIgnoreNullValuesFalse(t *testing.T) { + ctx := context.Background() + + // setup cloudwatch + cloudwatchClient, err := cloudwatch.NewClient(ctx, awsRegion, awsAccessKeyID, awsSecretAccessKey, "") + require.Nil(t, err, "error creating cloudwatch client") + + // check that the metric in question is not already present, and is returning + // an empty set of values. + metricQuery := cloudwatch.CreateMetricDataInputForEmptyMetricValues(cloudwatchMetricNamespace, cloudwatchMetricName, cloudwatchMetricDimensionName, cloudwatchMetricDimensionValue) + metricData, err := cloudwatch.GetMetricData(ctx, cloudwatchClient, metricQuery) + require.Nil(t, err, "error getting metric data") + require.Nil(t, cloudwatch.ExpectEmptyMetricDataResults(metricData), "metric data should be empty") + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + defer DeleteKubernetesResources(t, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), + "replica count should be %d after 1 minute", minReplicaCount) + + // check that the deployment did not scale, as the metric query is returning + // null values and the scaledobject is receiving errors, the deployment + // should not scale, even though the minMetricValue is set to 1. + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)), + AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)), + AwsRegion: awsRegion, + CloudWatchMetricName: cloudwatchMetricName, + CloudWatchMetricNamespace: cloudwatchMetricNamespace, + CloudWatchMetricDimensionName: cloudwatchMetricDimensionName, + CloudWatchMetricDimensionValue: cloudwatchMetricDimensionValue, + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} diff --git a/tests/scalers/aws/aws_cloudwatch_min_metric_value/aws_cloudwatch_min_metric_value_test.go b/tests/scalers/aws/aws_cloudwatch_min_metric_value/aws_cloudwatch_min_metric_value_test.go new file mode 100644 index 00000000000..341cead869c --- /dev/null +++ b/tests/scalers/aws/aws_cloudwatch_min_metric_value/aws_cloudwatch_min_metric_value_test.go @@ -0,0 +1,199 @@ +//go:build e2e +// +build e2e + +package aws_cloudwatch_min_metric_value_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "testing" + "time" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + . "github.com/kedacore/keda/v2/tests/helper" + "github.com/kedacore/keda/v2/tests/scalers/aws/helpers/cloudwatch" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "aws-cloudwatch-min-metric-value-test" +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + SecretName string + AwsAccessKeyID string + AwsSecretAccessKey string + AwsRegion string + CloudWatchMetricName string + CloudWatchMetricNamespace string + CloudWatchMetricDimensionName string + CloudWatchMetricDimensionValue string +} + +const ( + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + AWS_ACCESS_KEY_ID: {{.AwsAccessKeyID}} + AWS_SECRET_ACCESS_KEY: {{.AwsSecretAccessKey}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: keda-trigger-auth-aws-credentials + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: awsAccessKeyID # Required. + name: {{.SecretName}} # Required. + key: AWS_ACCESS_KEY_ID # Required. + - parameter: awsSecretAccessKey # Required. + name: {{.SecretName}} # Required. + key: AWS_SECRET_ACCESS_KEY # Required. +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 2 + minReplicaCount: 0 + cooldownPeriod: 1 + triggers: + - type: aws-cloudwatch + authenticationRef: + name: keda-trigger-auth-aws-credentials + metadata: + awsRegion: {{.AwsRegion}} + namespace: {{.CloudWatchMetricNamespace}} + dimensionName: {{.CloudWatchMetricDimensionName}} + dimensionValue: {{.CloudWatchMetricDimensionValue}} + metricName: {{.CloudWatchMetricName}} + targetMetricValue: "1" + minMetricValue: "1" + metricCollectionTime: "120" + metricStatPeriod: "60" +` +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + cloudwatchMetricName = fmt.Sprintf("cw-%d", GetRandomNumber()) + awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY") + awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY") + awsRegion = os.Getenv("TF_AWS_REGION") + cloudwatchMetricNamespace = "DoesNotExist" + cloudwatchMetricDimensionName = "dimensionName" + cloudwatchMetricDimensionValue = "dimensionValue" + minReplicaCount = 0 + minMetricValueReplicaCount = 1 +) + +// This test is to verify that the scaler returns the minMetricValue when the metric +// value is null and ignoreNullValues is set to true. +func TestCloudWatchScalerWithMinMetricValue(t *testing.T) { + ctx := context.Background() + + // setup cloudwatch + cloudwatchClient, err := cloudwatch.NewClient(ctx, awsRegion, awsAccessKeyID, awsSecretAccessKey, "") + assert.Nil(t, err, "error creating cloudwatch client") + + // check that the metric in question is not already present, and is returning + // an empty set of values. + metricQuery := cloudwatch.CreateMetricDataInputForEmptyMetricValues(cloudwatchMetricNamespace, cloudwatchMetricName, cloudwatchMetricDimensionName, cloudwatchMetricDimensionValue) + metricData, err := cloudwatch.GetMetricData(ctx, cloudwatchClient, metricQuery) + require.Nil(t, err, "error getting metric data") + require.Nil(t, cloudwatch.ExpectEmptyMetricDataResults(metricData), "metric data should be empty") + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + defer DeleteKubernetesResources(t, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), + "replica count should be %d after 1 minute", minMetricValueReplicaCount) + + // Allow a small amount of grace for stabilization, otherwise we will see the + // minMetricValue of 1 scale up the deployment from 0 to 1, as the deployment + // starts at a minReplicaCount of 0. The reason for this is to ensure that the + // scaler is still functioning when the metric value is null, as opposed to + // returning an error, and not scaling the workload at all. + time.Sleep(5 * time.Second) + + // Then check that the deployment did not scale further, as the metric query + // is returning null values, the scaler should evaluate the metric value as + // the minMetricValue of 1, and not scale the deployment further beyond this + // point. + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minMetricValueReplicaCount, 60) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)), + AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)), + AwsRegion: awsRegion, + CloudWatchMetricName: cloudwatchMetricName, + CloudWatchMetricNamespace: cloudwatchMetricNamespace, + CloudWatchMetricDimensionName: cloudwatchMetricDimensionName, + CloudWatchMetricDimensionValue: cloudwatchMetricDimensionValue, + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} diff --git a/tests/scalers/aws/helpers/cloudwatch/cloudwatch.go b/tests/scalers/aws/helpers/cloudwatch/cloudwatch.go new file mode 100644 index 00000000000..d30434e47f1 --- /dev/null +++ b/tests/scalers/aws/helpers/cloudwatch/cloudwatch.go @@ -0,0 +1,70 @@ +//go:build e2e +// +build e2e + +package cloudwatch + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" +) + +// NewClient will provision a new Cloudwatch Client. +func NewClient(ctx context.Context, region, accessKeyID, secretAccessKey, sessionToken string) (*cloudwatch.Client, error) { + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + if err != nil { + return nil, err + } + + cfg.Credentials = credentials.NewStaticCredentialsProvider(accessKeyID, secretAccessKey, sessionToken) + return cloudwatch.NewFromConfig(cfg), nil +} + +// CreateMetricDataInputForEmptyMetricValues will return a GetMetricDataInput with a single metric query +// that is expected to return no metric values. +func CreateMetricDataInputForEmptyMetricValues(metricNamespace, metricName, dimensionName, dimensionValue string) *cloudwatch.GetMetricDataInput { + return &cloudwatch.GetMetricDataInput{ + MetricDataQueries: []types.MetricDataQuery{ + { + Id: aws.String("m1"), + MetricStat: &types.MetricStat{ + Metric: &types.Metric{ + Namespace: &metricNamespace, + MetricName: &metricName, + Dimensions: []types.Dimension{ + { + Name: &dimensionName, + Value: &dimensionValue, + }, + }, + }, Period: aws.Int32(60), Stat: aws.String("Average"), + }, + }, + }, + // evaluate +/- 10 minutes from now to be sure we cover the query window + // as all e2e tests use a 300 second query window. + EndTime: aws.Time(time.Now().Add(time.Minute * 10)), + StartTime: aws.Time(time.Now().Add(-time.Minute * 10)), + } +} + +// GetMetricData will return the metric data for the given input. +func GetMetricData(ctx context.Context, cloudwatchClient *cloudwatch.Client, metricDataInput *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { + return cloudwatchClient.GetMetricData(ctx, metricDataInput) +} + +// ExpectEmptyMetricDataResults will evaluate the custom metric for any metric values, if any +// values are an error will be returned. +func ExpectEmptyMetricDataResults(metricData *cloudwatch.GetMetricDataOutput) error { + if len(metricData.MetricDataResults) != 1 || len(metricData.MetricDataResults[0].Values) > 0 { + return fmt.Errorf("found unexpected metric data results for metricData: %+v", metricData.MetricDataResults) + } + + return nil +} From a5e169e75702eaa45f7c805e007205da97b2b4e4 Mon Sep 17 00:00:00 2001 From: Rob Pickerill Date: Wed, 21 Aug 2024 12:39:06 +0100 Subject: [PATCH 11/16] Add connection name for the rabbitmq scaler (#6093) * add a static connection name Signed-off-by: robpickerill * Update pkg/scalers/rabbitmq_scaler.go Co-authored-by: Jorge Turrado Ferrero Signed-off-by: Rob Pickerill * add improvement to changelog Signed-off-by: robpickerill * add namepace and so name to conn name Signed-off-by: robpickerill * Update comment Signed-off-by: Jorge Turrado Ferrero --------- Signed-off-by: robpickerill Signed-off-by: Rob Pickerill Signed-off-by: Jorge Turrado Ferrero Co-authored-by: Jorge Turrado Ferrero Signed-off-by: Fira Curie --- CHANGELOG.md | 2 ++ pkg/scalers/rabbitmq_scaler.go | 36 +++++++++++++++++++---------- pkg/scalers/rabbitmq_scaler_test.go | 13 +++++++++++ 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 928a8128c72..5f86988e080 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,8 @@ Here is an overview of all new **experimental** features: - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) - **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) - **Kafka**: Fix logic to scale to zero on invalid offset even with earliest offsetResetPolicy ([#5689](https://github.com/kedacore/keda/issues/5689)) +- **RabbitMQ Scaler**: Add connection name for AMQP ([#5958](https://github.com/kedacore/keda/issues/5958)) +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) ### Fixes diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go index be2ac66995f..f30d92fa13a 100644 --- a/pkg/scalers/rabbitmq_scaler.go +++ b/pkg/scalers/rabbitmq_scaler.go @@ -69,6 +69,7 @@ type rabbitMQScaler struct { type rabbitMQMetadata struct { queueName string + connectionName string // name used for the AMQP connection mode string // QueueLength or MessageRate value float64 // trigger value (queue length or publish/sec. rate) activationValue float64 // activation value @@ -232,7 +233,9 @@ func resolveTLSAuthParams(config *scalersconfig.ScalerConfig, meta *rabbitMQMeta } func parseRabbitMQMetadata(config *scalersconfig.ScalerConfig) (*rabbitMQMetadata, error) { - meta := rabbitMQMetadata{} + meta := rabbitMQMetadata{ + connectionName: connectionName(config), + } // Resolve protocol type if err := resolveProtocol(config, &meta); err != nil { @@ -445,22 +448,25 @@ func parseTrigger(meta *rabbitMQMetadata, config *scalersconfig.ScalerConfig) (* } // getConnectionAndChannel returns an amqp connection. If enableTLS is true tls connection is made using -// -// the given ceClient cert, ceClient key,and CA certificate. If clientKeyPassword is not empty the provided password will be used to -// +// the given ceClient cert, ceClient key,and CA certificate. If clientKeyPassword is not empty the provided password will be used to // decrypt the given key. If enableTLS is disabled then amqp connection will be created without tls. func getConnectionAndChannel(host string, meta *rabbitMQMetadata) (*amqp.Connection, *amqp.Channel, error) { - var conn *amqp.Connection - var err error + amqpConfig := amqp.Config{ + Properties: amqp.Table{ + "connection_name": meta.connectionName, + }, + } + if meta.enableTLS { - tlsConfig, configErr := kedautil.NewTLSConfigWithPassword(meta.cert, meta.key, meta.keyPassword, meta.ca, meta.unsafeSsl) - if configErr != nil { - return nil, nil, configErr + tlsConfig, err := kedautil.NewTLSConfigWithPassword(meta.cert, meta.key, meta.keyPassword, meta.ca, meta.unsafeSsl) + if err != nil { + return nil, nil, err } - conn, err = amqp.DialTLS(host, tlsConfig) - } else { - conn, err = amqp.Dial(host) + + amqpConfig.TLSClientConfig = tlsConfig } + + conn, err := amqp.DialConfig(host, amqpConfig) if err != nil { return nil, nil, err } @@ -715,3 +721,9 @@ func (s *rabbitMQScaler) anonymizeRabbitMQError(err error) error { errorMessage := fmt.Sprintf("error inspecting rabbitMQ: %s", err) return fmt.Errorf(rabbitMQAnonymizePattern.ReplaceAllString(errorMessage, "user:password@")) } + +// connectionName is used to provide a deterministic AMQP connection name when +// connecting to RabbitMQ +func connectionName(config *scalersconfig.ScalerConfig) string { + return fmt.Sprintf("keda-%s-%s", config.ScalableObjectNamespace, config.ScalableObjectName) +} diff --git a/pkg/scalers/rabbitmq_scaler_test.go b/pkg/scalers/rabbitmq_scaler_test.go index 1d2ba1eacd2..ef705757a3c 100644 --- a/pkg/scalers/rabbitmq_scaler_test.go +++ b/pkg/scalers/rabbitmq_scaler_test.go @@ -746,3 +746,16 @@ func TestRegexQueueMissingError(t *testing.T) { } } } + +func TestConnectionName(t *testing.T) { + c := scalersconfig.ScalerConfig{ + ScalableObjectNamespace: "test-namespace", + ScalableObjectName: "test-name", + } + + connectionName := connectionName(&c) + + if connectionName != "keda-test-namespace-test-name" { + t.Error("Expected connection name to be keda-test-namespace-test-name but got", connectionName) + } +} From 6015f2d1b44c8942fff666a43251ce9491de3493 Mon Sep 17 00:00:00 2001 From: Fira Curie Date: Sun, 25 Aug 2024 02:42:21 +0200 Subject: [PATCH 12/16] add capability to parse *url.URL Signed-off-by: Fira Curie --- pkg/scalers/scalersconfig/typed_config.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pkg/scalers/scalersconfig/typed_config.go b/pkg/scalers/scalersconfig/typed_config.go index b06e1478b88..a6a383132f0 100644 --- a/pkg/scalers/scalersconfig/typed_config.go +++ b/pkg/scalers/scalersconfig/typed_config.go @@ -273,6 +273,22 @@ func setConfigValueURLParams(params Params, valFromConfig string, field reflect. return nil } +// setConfigValueURL is a function that sets the value of the url.URL field +func setConfigValueURL(_ Params, valFromConfig string, field reflect.Value) error { + u, err := url.Parse(valFromConfig) + if err != nil { + return fmt.Errorf("expected url.URL or *url.URL, unable to parse %q: %w", valFromConfig, err) + } + + // If the field type is a pointer to url.URL (`*url.URL`), set the value directly + if field.Kind() == reflect.Ptr && field.Type().Elem() == reflect.TypeOf(url.URL{}) { + field.Set(reflect.ValueOf(u)) + return nil + } + + return nil +} + // setConfigValueMap is a function that sets the value of the map field func setConfigValueMap(params Params, valFromConfig string, field reflect.Value) error { field.Set(reflect.MakeMap(reflect.MapOf(field.Type().Key(), field.Type().Elem()))) @@ -373,6 +389,9 @@ func setConfigValueHelper(params Params, valFromConfig string, field reflect.Val if field.Type() == reflect.TypeOf(url.Values{}) { return setConfigValueURLParams(params, valFromConfig, field) } + if field.Type() == reflect.TypeOf(&url.URL{}) { + return setConfigValueURL(params, valFromConfig, field) + } if field.Kind() == reflect.Map { return setConfigValueMap(params, valFromConfig, field) } From 47140fe0b0ed847f99c59b1c0fa5c6e95c31f59c Mon Sep 17 00:00:00 2001 From: Fira Curie Date: Sun, 25 Aug 2024 02:50:51 +0200 Subject: [PATCH 13/16] fix config parsing panic due to private fields Signed-off-by: Fira Curie --- pkg/scalers/gitlab_runner_scaler.go | 30 ++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/pkg/scalers/gitlab_runner_scaler.go b/pkg/scalers/gitlab_runner_scaler.go index 6665dce558c..11f475d7637 100644 --- a/pkg/scalers/gitlab_runner_scaler.go +++ b/pkg/scalers/gitlab_runner_scaler.go @@ -33,12 +33,12 @@ type gitlabRunnerScaler struct { } type gitlabRunnerMetadata struct { - gitlabAPIURL *url.URL `keda:"name=gitlabAPIURL, order=triggerMetadata, default=https://gitlab.com, optional"` - personalAccessToken string `keda:"name=personalAccessToken, order=authParams"` - projectID string `keda:"name=projectID, order=triggerMetadata"` + GitLabAPIURL *url.URL `keda:"name=gitlabAPIURL, order=triggerMetadata, default=https://gitlab.com, optional"` + PersonalAccessToken string `keda:"name=personalAccessToken, order=authParams"` + ProjectID string `keda:"name=projectID, order=triggerMetadata"` - targetPipelineQueueLength int64 `keda:"name=targetPipelineQueueLength, order=triggerMetadata, default=1"` - triggerIndex int + TargetPipelineQueueLength int64 `keda:"name=targetPipelineQueueLength, order=triggerMetadata, default=1, optional"` + TriggerIndex int } // NewGitLabRunnerScaler creates a new GitLab Runner Scaler @@ -55,10 +55,6 @@ func NewGitLabRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error parsing GitLab Runner metadata: %w", err) } - uri := constructGitlabAPIPipelinesURL(*meta.gitlabAPIURL, meta.projectID, pipelineWaitingForResourceStatus) - - meta.gitlabAPIURL = &uri - return &gitlabRunnerScaler{ metricType: metricType, metadata: meta, @@ -70,11 +66,15 @@ func NewGitLabRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { func parseGitLabRunnerMetadata(config *scalersconfig.ScalerConfig) (*gitlabRunnerMetadata, error) { meta := gitlabRunnerMetadata{} - meta.triggerIndex = config.TriggerIndex + meta.TriggerIndex = config.TriggerIndex if err := config.TypedConfig(&meta); err != nil { return nil, fmt.Errorf("error parsing gitlabRunner metadata: %w", err) } + uri := constructGitlabAPIPipelinesURL(*meta.GitLabAPIURL, meta.ProjectID, pipelineWaitingForResourceStatus) + + meta.GitLabAPIURL = &uri + return &meta, nil } @@ -88,15 +88,15 @@ func (s *gitlabRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricNa metric := GenerateMetricInMili(metricName, float64(queueLen)) - return []external_metrics.ExternalMetricValue{metric}, queueLen >= s.metadata.targetPipelineQueueLength, nil + return []external_metrics.ExternalMetricValue{metric}, queueLen >= s.metadata.TargetPipelineQueueLength, nil } func (s *gitlabRunnerScaler) GetMetricSpecForScaling(_ context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("gitlab-runner-%s", s.metadata.projectID))), + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("gitlab-runner-%s", s.metadata.ProjectID))), }, - Target: GetMetricTarget(s.metricType, s.metadata.targetPipelineQueueLength), + Target: GetMetricTarget(s.metricType, s.metadata.TargetPipelineQueueLength), } metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2.MetricSpec{metricSpec} @@ -129,7 +129,7 @@ func (s *gitlabRunnerScaler) getPipelineCount(ctx context.Context, uri string) ( req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") - req.Header.Set("PRIVATE-TOKEN", s.metadata.personalAccessToken) + req.Header.Set("PRIVATE-TOKEN", s.metadata.PersonalAccessToken) res, err := s.httpClient.Do(req) if err != nil { @@ -156,7 +156,7 @@ func (s *gitlabRunnerScaler) getPipelineQueueLength(ctx context.Context) (int64, page := 1 for ; page < maxGitlabAPIPageCount; page++ { - pagedURL := pagedURL(*s.metadata.gitlabAPIURL, fmt.Sprint(page)) + pagedURL := pagedURL(*s.metadata.GitLabAPIURL, fmt.Sprint(page)) gitlabPipelinesLen, err := s.getPipelineCount(ctx, pagedURL.String()) if err != nil { From e50182462df65a64ac7a85cec52cdc1ff810470c Mon Sep 17 00:00:00 2001 From: Fira Curie Date: Mon, 26 Aug 2024 00:14:15 +0200 Subject: [PATCH 14/16] add tests Signed-off-by: Fira Curie --- pkg/scalers/gitlab_runner_scaler_test.go | 424 +++++++++++++++++++++++ 1 file changed, 424 insertions(+) create mode 100644 pkg/scalers/gitlab_runner_scaler_test.go diff --git a/pkg/scalers/gitlab_runner_scaler_test.go b/pkg/scalers/gitlab_runner_scaler_test.go new file mode 100644 index 00000000000..ecf2c5768a4 --- /dev/null +++ b/pkg/scalers/gitlab_runner_scaler_test.go @@ -0,0 +1,424 @@ +package scalers + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + v2 "k8s.io/api/autoscaling/v2" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" +) + +func TestParseGitLabRunnerMetadata(t *testing.T) { + // Create a properly initialized ScalerConfig with valid metadata. + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: map[string]string{ + "gitlabAPIURL": "https://gitlab.com", + "projectID": "12345", + "targetPipelineQueueLength": "5", + }, + AuthParams: map[string]string{ + "personalAccessToken": "fake-token", + }, + GlobalHTTPTimeout: 10 * time.Second, + TriggerIndex: 0, + } + + // Attempt to parse the metadata. + meta, err := parseGitLabRunnerMetadata(config) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + // Validate the parsed metadata + if meta.GitLabAPIURL.String() != "https://gitlab.com/api/v4/projects/12345/pipelines?per_page=200&status=waiting_for_resource" { + t.Errorf("Expected URL to be correctly formed, got %v", meta.GitLabAPIURL.String()) + } + + if meta.ProjectID != "12345" { + t.Errorf("Expected projectID to be '12345', got %v", meta.ProjectID) + } + + if meta.TargetPipelineQueueLength != 5 { + t.Errorf("Expected targetPipelineQueueLength to be 5, got %v", meta.TargetPipelineQueueLength) + } + + if meta.PersonalAccessToken != "fake-token" { + t.Errorf("Expected personalAccessToken to be 'fake-token', got %v", meta.PersonalAccessToken) + } +} + +func mustParseURL(rawURL string) *url.URL { + parsed, err := url.Parse(rawURL) + if err != nil { + panic(err) + } + return parsed +} + +func TestGitLabRunnerScaler_GetPipelineCount(t *testing.T) { + testCases := []struct { + name string + responseStatus int + responseBody interface{} + expectedCount int64 + expectError bool + }{ + { + name: "Valid response with pipelines", + responseStatus: http.StatusOK, + responseBody: []map[string]interface{}{ + {"id": 1}, + {"id": 2}, + {"id": 3}, + }, + expectedCount: 3, + expectError: false, + }, + { + name: "Valid response with no pipelines", + responseStatus: http.StatusOK, + responseBody: []map[string]interface{}{}, + expectedCount: 0, + expectError: false, + }, + { + name: "Unauthorized response", + responseStatus: http.StatusUnauthorized, + responseBody: map[string]string{"message": "401 Unauthorized"}, + expectedCount: 0, + expectError: true, + }, + { + name: "Invalid JSON response", + responseStatus: http.StatusOK, + responseBody: "invalid-json", + expectedCount: 0, + expectError: true, + }, + { + name: "Internal server error", + responseStatus: http.StatusInternalServerError, + responseBody: map[string]string{"message": "500 Internal Server Error"}, + expectedCount: 0, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tc.responseStatus) + if err := json.NewEncoder(w).Encode(tc.responseBody); err != nil { + t.Fatalf("failed to write response: %v", err) + } + })) + defer server.Close() + + meta := &gitlabRunnerMetadata{ + GitLabAPIURL: mustParseURL(server.URL), + PersonalAccessToken: "test-token", + } + + scaler := gitlabRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + logger: logr.Discard(), + } + + count, err := scaler.getPipelineCount(context.Background(), server.URL) + if tc.expectError { + assert.Error(t, err) + assert.Equal(t, tc.expectedCount, count) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedCount, count) + } + }) + } +} + +func TestGitLabRunnerScaler_GetPipelineQueueLength(t *testing.T) { + totalPipelines := 450 // More than one page + perPage := 200 + + // Create fake pipelines + createPipelines := func(count int) []map[string]interface{} { + pipelines := make([]map[string]interface{}, count) + for i := 0; i < count; i++ { + pipelines[i] = map[string]interface{}{ + "id": i + 1, + } + } + return pipelines + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + pageStr := r.URL.Query().Get("page") + page, _ := strconv.Atoi(pageStr) + start := (page - 1) * perPage + end := start + perPage + + if start >= totalPipelines { + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode([]map[string]interface{}{}) + return + } + + if end > totalPipelines { + end = totalPipelines + } + + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(createPipelines(end - start)) + })) + defer server.Close() + + meta := &gitlabRunnerMetadata{ + GitLabAPIURL: mustParseURL(server.URL), + PersonalAccessToken: "test-token", + } + + scaler := gitlabRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + logger: logr.Discard(), + } + + count, err := scaler.getPipelineQueueLength(context.Background()) + assert.NoError(t, err) + assert.Equal(t, int64(totalPipelines), count) +} + +func TestGitLabRunnerScaler_GetMetricsAndActivity(t *testing.T) { + testCases := []struct { + name string + pipelineQueueLength int64 + targetPipelineQueueLength int64 + expectedMetricValue int64 + expectedActive bool + expectError bool + }{ + { + name: "Queue length below target", + pipelineQueueLength: 2, + targetPipelineQueueLength: 5, + expectedMetricValue: 2, + expectedActive: false, + expectError: false, + }, + { + name: "Queue length equal to target", + pipelineQueueLength: 5, + targetPipelineQueueLength: 5, + expectedMetricValue: 5, + expectedActive: true, + expectError: false, + }, + { + name: "Queue length above target", + pipelineQueueLength: 10, + targetPipelineQueueLength: 5, + expectedMetricValue: 10, + expectedActive: true, + expectError: false, + }, + { + name: "Error retrieving queue length", + pipelineQueueLength: 0, + targetPipelineQueueLength: 5, + expectedMetricValue: 0, + expectedActive: false, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Setup mock server + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if tc.expectError { + w.WriteHeader(http.StatusInternalServerError) + return + } + + page := r.URL.Query().Get("page") + + w.WriteHeader(http.StatusOK) + + pipelines := make([]map[string]interface{}, 0, tc.pipelineQueueLength) + if page == "1" { + for i := int64(0); i < tc.pipelineQueueLength; i++ { + pipelines = append(pipelines, map[string]interface{}{ + "id": i + 1, + }) + } + } + + _ = json.NewEncoder(w).Encode(pipelines) + })) + defer server.Close() + + meta := &gitlabRunnerMetadata{ + GitLabAPIURL: mustParseURL(server.URL), + PersonalAccessToken: "test-token", + TargetPipelineQueueLength: tc.targetPipelineQueueLength, + ProjectID: "12345", + } + + scaler := gitlabRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + logger: logr.Discard(), + } + + metrics, active, err := scaler.GetMetricsAndActivity(context.Background(), "gitlab-runner-queue-length") + if tc.expectError { + assert.Error(t, err) + assert.Nil(t, metrics, "Expected no metrics") + assert.False(t, active, "Expected not active") + } else { + assert.NoError(t, err) + assert.Len(t, metrics, 1, "Expected one metric") + assert.Equal(t, float64(tc.expectedMetricValue), metrics[0].Value.AsApproximateFloat64(), "Expected metric value") + assert.Equal(t, tc.expectedActive, active, "Expected active") + } + }) + } +} + +func TestGitLabRunnerScaler_GetMetricSpecForScaling(t *testing.T) { + meta := &gitlabRunnerMetadata{ + ProjectID: "12345", + TargetPipelineQueueLength: 5, + TriggerIndex: 0, + } + + scaler := gitlabRunnerScaler{ + metadata: meta, + metricType: v2.AverageValueMetricType, + } + + metricSpecs := scaler.GetMetricSpecForScaling(context.Background()) + assert.Len(t, metricSpecs, 1) + + metricSpec := metricSpecs[0] + assert.Equal(t, v2.ExternalMetricSourceType, metricSpec.Type) + assert.Equal(t, "s0-gitlab-runner-12345", metricSpec.External.Metric.Name) + assert.Equal(t, int64(5), metricSpec.External.Target.AverageValue.Value()) +} + +func TestGitLabRunnerScaler_Close(t *testing.T) { + meta := &gitlabRunnerMetadata{} + scaler := gitlabRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + } + + err := scaler.Close(context.Background()) + assert.NoError(t, err) +} + +func TestConstructGitlabAPIPipelinesURL(t *testing.T) { + baseURL := mustParseURL("https://gitlab.example.com") + projectID := "12345" + status := "waiting_for_resource" + + expectedURL := "https://gitlab.example.com/api/v4/projects/12345/pipelines?per_page=200&status=waiting_for_resource" + + resultURL := constructGitlabAPIPipelinesURL(*baseURL, projectID, status) + assert.Equal(t, expectedURL, resultURL.String()) +} + +func TestPagedURL(t *testing.T) { + baseURL := mustParseURL("https://gitlab.example.com/api/v4/projects/12345/pipelines?per_page=200&status=waiting_for_resource") + page := "2" + + expectedURL := "https://gitlab.example.com/api/v4/projects/12345/pipelines?page=2&per_page=200&status=waiting_for_resource" + + resultURL := pagedURL(*baseURL, page) + assert.Equal(t, expectedURL, resultURL.String()) +} + +func TestGetPipelineCount_RequestError(t *testing.T) { + meta := &gitlabRunnerMetadata{ + GitLabAPIURL: mustParseURL("http://invalid-url"), + PersonalAccessToken: "test-token", + } + + scaler := gitlabRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + logger: logr.Discard(), + } + + _, err := scaler.getPipelineCount(context.Background(), "http://invalid-url") + assert.Error(t, err) +} + +func TestGetPipelineQueueLength_MaxPagesExceeded(t *testing.T) { + serverCallCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverCallCount++ + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode([]map[string]interface{}{ + {"id": 1}, + }) + })) + defer server.Close() + + meta := &gitlabRunnerMetadata{ + GitLabAPIURL: mustParseURL(server.URL), + PersonalAccessToken: "test-token", + } + + scaler := gitlabRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + logger: logr.Discard(), + } + + count, err := scaler.getPipelineQueueLength(context.Background()) + assert.NoError(t, err) + assert.Equal(t, int64(maxGitlabAPIPageCount), int64(serverCallCount)) + assert.Equal(t, int64(maxGitlabAPIPageCount), count) +} + +func TestGetPipelineQueueLength_RequestError(t *testing.T) { + meta := &gitlabRunnerMetadata{ + GitLabAPIURL: mustParseURL("http://invalid-url"), + PersonalAccessToken: "test-token", + } + + scaler := gitlabRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + logger: logr.Discard(), + } + + _, err := scaler.getPipelineQueueLength(context.Background()) + assert.Error(t, err) +} + +func TestNewGitLabRunnerScaler_InvalidMetricType(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: map[string]string{ + "projectID": "12345", + }, + AuthParams: map[string]string{ + "personalAccessToken": "test-token", + }, + MetricType: "InvalidType", + } + + _, err := NewGitLabRunnerScaler(config) + assert.Error(t, err) +} From 0de79567359da42efa04187f3cce8fc3bb42147a Mon Sep 17 00:00:00 2001 From: Fira Curie Date: Mon, 26 Aug 2024 00:43:23 +0200 Subject: [PATCH 15/16] fix scaler ordering Signed-off-by: Fira Curie --- pkg/scaling/scalers_builder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 65ddb367881..8bf740d6af1 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -183,10 +183,10 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, return scalers.NewStackdriverScaler(ctx, config) case "gcp-storage": return scalers.NewGcsScaler(config) - case "gitlab-runner": - return scalers.NewGitLabRunnerScaler(config) case "github-runner": return scalers.NewGitHubRunnerScaler(config) + case "gitlab-runner": + return scalers.NewGitLabRunnerScaler(config) case "graphite": return scalers.NewGraphiteScaler(config) case "huawei-cloudeye": From 285de8b4459d002a6acb073dc0545715af7957fb Mon Sep 17 00:00:00 2001 From: Fira Curie Date: Tue, 27 Aug 2024 08:43:44 +0200 Subject: [PATCH 16/16] add a simple unit test for url.URL parsing; fix broken tests in gitlab scaler; apply code review suggestions Signed-off-by: Fira Curie --- pkg/scalers/gitlab_runner_scaler_test.go | 2 +- pkg/scalers/scalersconfig/typed_config.go | 4 ++-- .../scalersconfig/typed_config_test.go | 23 +++++++++++++++++++ 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/pkg/scalers/gitlab_runner_scaler_test.go b/pkg/scalers/gitlab_runner_scaler_test.go index ecf2c5768a4..e9bb49dac7e 100644 --- a/pkg/scalers/gitlab_runner_scaler_test.go +++ b/pkg/scalers/gitlab_runner_scaler_test.go @@ -283,7 +283,7 @@ func TestGitLabRunnerScaler_GetMetricsAndActivity(t *testing.T) { metrics, active, err := scaler.GetMetricsAndActivity(context.Background(), "gitlab-runner-queue-length") if tc.expectError { assert.Error(t, err) - assert.Nil(t, metrics, "Expected no metrics") + assert.Empty(t, metrics, "Expected no metrics") assert.False(t, active, "Expected not active") } else { assert.NoError(t, err) diff --git a/pkg/scalers/scalersconfig/typed_config.go b/pkg/scalers/scalersconfig/typed_config.go index a6a383132f0..e83960fb35a 100644 --- a/pkg/scalers/scalersconfig/typed_config.go +++ b/pkg/scalers/scalersconfig/typed_config.go @@ -274,7 +274,7 @@ func setConfigValueURLParams(params Params, valFromConfig string, field reflect. } // setConfigValueURL is a function that sets the value of the url.URL field -func setConfigValueURL(_ Params, valFromConfig string, field reflect.Value) error { +func setConfigValueURL(valFromConfig string, field reflect.Value) error { u, err := url.Parse(valFromConfig) if err != nil { return fmt.Errorf("expected url.URL or *url.URL, unable to parse %q: %w", valFromConfig, err) @@ -390,7 +390,7 @@ func setConfigValueHelper(params Params, valFromConfig string, field reflect.Val return setConfigValueURLParams(params, valFromConfig, field) } if field.Type() == reflect.TypeOf(&url.URL{}) { - return setConfigValueURL(params, valFromConfig, field) + return setConfigValueURL(valFromConfig, field) } if field.Kind() == reflect.Map { return setConfigValueMap(params, valFromConfig, field) diff --git a/pkg/scalers/scalersconfig/typed_config_test.go b/pkg/scalers/scalersconfig/typed_config_test.go index 26b189c8dc5..962622db1eb 100644 --- a/pkg/scalers/scalersconfig/typed_config_test.go +++ b/pkg/scalers/scalersconfig/typed_config_test.go @@ -326,6 +326,29 @@ func TestURLValues(t *testing.T) { Expect(ts.EndpointParams["key2"]).To(ConsistOf("value2")) } +func TestSetConfigValueURL(t *testing.T) { + RegisterTestingT(t) + + sc := &ScalerConfig{ + AuthParams: map[string]string{ + "endpoint": "https://example.com/path?query=1", + }, + } + + type testStruct struct { + Endpoint *url.URL `keda:"name=endpoint, order=authParams"` + } + + ts := testStruct{} + err := sc.TypedConfig(&ts) + Expect(err).To(BeNil()) + Expect(ts.Endpoint).ToNot(BeNil()) + Expect(ts.Endpoint.Scheme).To(Equal("https")) + Expect(ts.Endpoint.Host).To(Equal("example.com")) + Expect(ts.Endpoint.Path).To(Equal("/path")) + Expect(ts.Endpoint.RawQuery).To(Equal("query=1")) +} + // TestGenericMap tests the generic map type that is structurally similar to url.Values func TestGenericMap(t *testing.T) { RegisterTestingT(t)