diff --git a/CHANGELOG.md b/CHANGELOG.md index 27e076137b..1832fc50a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ * [CHANGE] Change all max async concurrency default values `50` to `3` #6268 * [CHANGE] Change default value of `-blocks-storage.bucket-store.index-cache.multilevel.max-async-concurrency` from `50` to `3` #6265 * [CHANGE] Enable Compactor and Alertmanager in target all. #6204 +* [FEATURE] Ruler: Pagination support for List Rules API. #6299 * [FEATURE] Query Frontend/Querier: Add protobuf codec `-api.querier-default-codec` and the option to choose response compression type `-querier.response-compression`. #5527 * [FEATURE] Ruler: Experimental: Add `ruler.frontend-address` to allow query to query frontends instead of ingesters. #6151 * [FEATURE] Ruler: Minimize chances of missed rule group evaluations that can occur due to OOM kills, bad underlying nodes, or due to an unhealthy ruler that appears in the ring as healthy. This feature is enabled via `-ruler.enable-ha-evaluation` flag. #6129 diff --git a/integration/e2ecortex/client.go b/integration/e2ecortex/client.go index 60881d1008..2de73dbdd3 100644 --- a/integration/e2ecortex/client.go +++ b/integration/e2ecortex/client.go @@ -603,6 +603,8 @@ type RuleFilter struct { RuleNames []string RuleType string ExcludeAlerts string + MaxRuleGroup int + NextToken string } func addQueryParams(urlValues url.Values, paramName string, params ...string) { @@ -614,12 +616,12 @@ func addQueryParams(urlValues url.Values, paramName string, params ...string) { } // GetPrometheusRules fetches the rules from the Prometheus endpoint /api/v1/rules. -func (c *Client) GetPrometheusRules(filter RuleFilter) ([]*ruler.RuleGroup, error) { +func (c *Client) GetPrometheusRules(filter RuleFilter) ([]*ruler.RuleGroup, string, error) { // Create HTTP request req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/api/prom/api/v1/rules", c.rulerAddress), nil) if err != nil { - return nil, err + return nil, "", err } req.Header.Set("X-Scope-OrgID", c.orgID) @@ -629,6 +631,12 @@ func (c *Client) GetPrometheusRules(filter RuleFilter) ([]*ruler.RuleGroup, erro addQueryParams(urlValues, "rule_group[]", filter.RuleGroupNames...) addQueryParams(urlValues, "type", filter.RuleType) addQueryParams(urlValues, "exclude_alerts", filter.ExcludeAlerts) + if filter.MaxRuleGroup > 0 { + addQueryParams(urlValues, "group_limit", strconv.Itoa(filter.MaxRuleGroup)) + } + if filter.NextToken != "" { + addQueryParams(urlValues, "group_next_token", filter.NextToken) + } req.URL.RawQuery = urlValues.Encode() ctx, cancel := context.WithTimeout(context.Background(), c.timeout) @@ -637,13 +645,13 @@ func (c *Client) GetPrometheusRules(filter RuleFilter) ([]*ruler.RuleGroup, erro // Execute HTTP request res, err := c.httpClient.Do(req.WithContext(ctx)) if err != nil { - return nil, err + return nil, "", err } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { - return nil, err + return nil, "", err } // Decode the response. @@ -654,14 +662,14 @@ func (c *Client) GetPrometheusRules(filter RuleFilter) ([]*ruler.RuleGroup, erro decoded := &response{} if err := json.Unmarshal(body, decoded); err != nil { - return nil, err + return nil, "", err } if decoded.Status != "success" { - return nil, fmt.Errorf("unexpected response status '%s'", decoded.Status) + return nil, "", fmt.Errorf("unexpected response status '%s'", decoded.Status) } - return decoded.Data.RuleGroups, nil + return decoded.Data.RuleGroups, decoded.Data.GroupNextToken, nil } // GetRuleGroups gets the configured rule groups from the ruler. diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 121ce59887..f7d16507d1 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -14,6 +14,7 @@ import ( "net/http" "os" "path/filepath" + "sort" "strconv" "strings" "testing" @@ -34,6 +35,7 @@ import ( e2edb "github.com/cortexproject/cortex/integration/e2e/db" "github.com/cortexproject/cortex/integration/e2ecortex" "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/ruler/rulespb" "github.com/cortexproject/cortex/pkg/storage/tsdb" ) @@ -278,7 +280,7 @@ func TestRulerSharding(t *testing.T) { require.NoError(t, ruler2.WaitSumMetrics(e2e.Equals(numRulesGroups), "cortex_ruler_rule_groups_in_store")) // Fetch the rules and ensure they match the configured ones. - actualGroups, err := c.GetPrometheusRules(e2ecortex.DefaultFilter) + actualGroups, _, err := c.GetPrometheusRules(e2ecortex.DefaultFilter) require.NoError(t, err) var actualNames []string @@ -493,13 +495,412 @@ func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) { } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - actualGroups, err := c.GetPrometheusRules(tc.filter) + actualGroups, _, err := c.GetPrometheusRules(tc.filter) require.NoError(t, err) tc.resultCheckFn(t, actualGroups) }) } } +func TestRulesPaginationAPISharding(t *testing.T) { + testRulesPaginationAPIWithSharding(t, false) +} + +func TestRulesPaginationAPIShardingWithAPIRulesBackupEnabled(t *testing.T) { + testRulesPaginationAPIWithSharding(t, true) +} + +func testRulesPaginationAPIWithSharding(t *testing.T, enableRulesBackup bool) { + const numRulesGroups = 100 + + random := rand.New(rand.NewSource(time.Now().UnixNano())) + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Generate multiple rule groups, with 1 rule each. + ruleGroups := make([]rulefmt.RuleGroup, numRulesGroups) + expectedNames := make([]string, numRulesGroups) + alertCount := 0 + evalInterval, _ := model.ParseDuration("1s") + for i := 0; i < numRulesGroups; i++ { + num := random.Intn(100) + var ruleNode yaml.Node + var exprNode yaml.Node + + ruleNode.SetString(fmt.Sprintf("rule_%d", i)) + exprNode.SetString(strconv.Itoa(i)) + ruleName := fmt.Sprintf("test_%d", i) + + expectedNames[i] = ruleName + if num%2 == 0 { + alertCount++ + ruleGroups[i] = rulefmt.RuleGroup{ + Name: ruleName, + Interval: evalInterval, + Rules: []rulefmt.RuleNode{{ + Alert: ruleNode, + Expr: exprNode, + }}, + } + } else { + ruleGroups[i] = rulefmt.RuleGroup{ + Name: ruleName, + Interval: evalInterval, + Rules: []rulefmt.RuleNode{{ + Record: ruleNode, + Expr: exprNode, + }}, + } + } + } + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, rulestoreBucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Configure the ruler. + overrides := map[string]string{ + // Since we're not going to run any rule, we don't need the + // store-gateway to be configured to a valid address. + "-querier.store-gateway-addresses": "localhost:12345", + // Enable the bucket index so we can skip the initial bucket scan. + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-ruler.poll-interval": "5s", + } + if enableRulesBackup { + overrides["-ruler.ring.replication-factor"] = "2" + } + rulerFlags := mergeFlags( + BlocksStorageFlags(), + RulerFlags(), + RulerShardingFlags(consul.NetworkHTTPEndpoint()), + overrides, + ) + + // Start rulers. + ruler1 := e2ecortex.NewRuler("ruler-1", consul.NetworkHTTPEndpoint(), rulerFlags, "") + ruler2 := e2ecortex.NewRuler("ruler-2", consul.NetworkHTTPEndpoint(), rulerFlags, "") + ruler3 := e2ecortex.NewRuler("ruler-3", consul.NetworkHTTPEndpoint(), rulerFlags, "") + rulers := e2ecortex.NewCompositeCortexService(ruler1, ruler2, ruler3) + require.NoError(t, s.StartAndWaitReady(ruler1, ruler2, ruler3)) + + // Upload rule groups to one of the rulers. + c, err := e2ecortex.NewClient("", "", "", ruler1.HTTPEndpoint(), "user-1") + require.NoError(t, err) + + namespaceNames := []string{"test1", "test2", "test3", "test4", "test5"} + namespaceNameCount := make([]int, len(namespaceNames)) + nsRand := rand.New(rand.NewSource(time.Now().UnixNano())) + ruleGroupToNSMap := map[string]string{} + for _, ruleGroup := range ruleGroups { + index := nsRand.Intn(len(namespaceNames)) + namespaceNameCount[index] = namespaceNameCount[index] + 1 + require.NoError(t, c.SetRuleGroup(ruleGroup, namespaceNames[index])) + ruleGroupToNSMap[ruleGroup.Name] = namespaceNames[index] + } + + // Wait until rulers have loaded all rules. + require.NoError(t, rulers.WaitSumMetricsWithOptions(e2e.Equals(numRulesGroups), []string{"cortex_prometheus_rule_group_rules"}, e2e.WaitMissingMetrics)) + + // Since rulers have loaded all rules, we expect that rules have been sharded + // between the two rulers. + require.NoError(t, ruler1.WaitSumMetrics(e2e.Less(numRulesGroups), "cortex_prometheus_rule_group_rules")) + require.NoError(t, ruler2.WaitSumMetrics(e2e.Less(numRulesGroups), "cortex_prometheus_rule_group_rules")) + + testCases := map[string]struct { + filter e2ecortex.RuleFilter + resultCheckFn func(assert.TestingT, []*ruler.RuleGroup, string, int) + iterations int + }{ + "List Rule Groups - Equal number of rule groups per page": { + filter: e2ecortex.RuleFilter{ + MaxRuleGroup: 20, + }, + resultCheckFn: func(t assert.TestingT, resultGroups []*ruler.RuleGroup, token string, iteration int) { + assert.Len(t, resultGroups, 20, "Expected %d rules but got %d", 20, len(resultGroups)) + if iteration < 4 { + assert.NotEmpty(t, token) + return + } + assert.Empty(t, token) + }, + iterations: 5, + }, + "List Rule Groups - Last page unequal": { + filter: e2ecortex.RuleFilter{ + MaxRuleGroup: 72, + }, + resultCheckFn: func(t assert.TestingT, resultGroups []*ruler.RuleGroup, token string, iteration int) { + if iteration == 0 { + assert.Len(t, resultGroups, 72, "Expected %d rules but got %d", 72, len(resultGroups)) + assert.NotEmpty(t, token) + return + } + assert.Len(t, resultGroups, 28, "Expected %d rules but got %d", 28, len(resultGroups)) + assert.Empty(t, token) + }, + iterations: 2, + }, + "List all rule groups": { + filter: e2ecortex.RuleFilter{}, + resultCheckFn: func(t assert.TestingT, resultGroups []*ruler.RuleGroup, token string, iteration int) { + assert.Len(t, resultGroups, 100, "Expected %d rules but got %d", 100, len(resultGroups)) + assert.Empty(t, token) + }, + iterations: 1, + }, + "List all rule groups - Max Rule Groups > Actual": { + filter: e2ecortex.RuleFilter{ + MaxRuleGroup: 200, + }, + resultCheckFn: func(t assert.TestingT, resultGroups []*ruler.RuleGroup, token string, iteration int) { + assert.Len(t, resultGroups, 100, "Expected %d rules but got %d", 100, len(resultGroups)) + assert.Empty(t, token) + }, + iterations: 1, + }, + } + + // For each test case, fetch the rules with configured filters, and ensure the results match. + if enableRulesBackup { + err := ruler2.Kill() // if rules backup is enabled the APIs should be able to handle a ruler going down + require.NoError(t, err) + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + filter := tc.filter + for i := 0; i < tc.iterations; i++ { + actualGroups, token, err := c.GetPrometheusRules(filter) + require.NoError(t, err) + tc.resultCheckFn(t, actualGroups, token, i) + filter.NextToken = token + } + }) + } +} + +func TestRulesPaginationAPIWithShardingAndNextToken(t *testing.T) { + const numRulesGroups = 100 + + random := rand.New(rand.NewSource(time.Now().UnixNano())) + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Generate multiple rule groups, with 1 rule each. + ruleGroups := make([]rulefmt.RuleGroup, numRulesGroups) + expectedNames := make([]string, numRulesGroups) + alertCount := 0 + evalInterval, _ := model.ParseDuration("1s") + for i := 0; i < numRulesGroups; i++ { + num := random.Intn(100) + var ruleNode yaml.Node + var exprNode yaml.Node + + ruleNode.SetString(fmt.Sprintf("rule_%d", i)) + exprNode.SetString(strconv.Itoa(i)) + ruleName := fmt.Sprintf("test_%d", i) + + expectedNames[i] = ruleName + if num%2 == 0 { + alertCount++ + ruleGroups[i] = rulefmt.RuleGroup{ + Name: ruleName, + Interval: evalInterval, + Rules: []rulefmt.RuleNode{{ + Alert: ruleNode, + Expr: exprNode, + }}, + } + } else { + ruleGroups[i] = rulefmt.RuleGroup{ + Name: ruleName, + Interval: evalInterval, + Rules: []rulefmt.RuleNode{{ + Record: ruleNode, + Expr: exprNode, + }}, + } + } + } + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, rulestoreBucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Configure the ruler. + overrides := map[string]string{ + // Since we're not going to run any rule, we don't need the + // store-gateway to be configured to a valid address. + "-querier.store-gateway-addresses": "localhost:12345", + // Enable the bucket index so we can skip the initial bucket scan. + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-ruler.poll-interval": "5s", + } + overrides["-ruler.ring.replication-factor"] = "2" + + rulerFlags := mergeFlags( + BlocksStorageFlags(), + RulerFlags(), + RulerShardingFlags(consul.NetworkHTTPEndpoint()), + overrides, + ) + + // Start rulers. + ruler1 := e2ecortex.NewRuler("ruler-1", consul.NetworkHTTPEndpoint(), rulerFlags, "") + ruler2 := e2ecortex.NewRuler("ruler-2", consul.NetworkHTTPEndpoint(), rulerFlags, "") + ruler3 := e2ecortex.NewRuler("ruler-3", consul.NetworkHTTPEndpoint(), rulerFlags, "") + rulers := e2ecortex.NewCompositeCortexService(ruler1, ruler2, ruler3) + require.NoError(t, s.StartAndWaitReady(ruler1, ruler2, ruler3)) + + // Upload rule groups to one of the rulers. + c, err := e2ecortex.NewClient("", "", "", ruler1.HTTPEndpoint(), "user-1") + require.NoError(t, err) + + namespaceNames := []string{"test1", "test2", "test3", "test4", "test5"} + namespaceNameCount := make([]int, len(namespaceNames)) + nsRand := rand.New(rand.NewSource(time.Now().UnixNano())) + + groupStateDescs := make([]*ruler.GroupStateDesc, len(ruleGroups)) + + for i, ruleGroup := range ruleGroups { + index := nsRand.Intn(len(namespaceNames)) + namespaceNameCount[index] = namespaceNameCount[index] + 1 + require.NoError(t, c.SetRuleGroup(ruleGroup, namespaceNames[index])) + groupStateDescs[i] = &ruler.GroupStateDesc{ + Group: &rulespb.RuleGroupDesc{ + Name: ruleGroup.Name, + Namespace: namespaceNames[index], + }, + } + } + + sort.Sort(ruler.PaginatedGroupStates(groupStateDescs)) + + // Wait until rulers have loaded all rules. + require.NoError(t, rulers.WaitSumMetricsWithOptions(e2e.Equals(numRulesGroups), []string{"cortex_prometheus_rule_group_rules"}, e2e.WaitMissingMetrics)) + + // Since rulers have loaded all rules, we expect that rules have been sharded + // between the two rulers. + require.NoError(t, ruler1.WaitSumMetrics(e2e.Less(numRulesGroups), "cortex_prometheus_rule_group_rules")) + require.NoError(t, ruler2.WaitSumMetrics(e2e.Less(numRulesGroups), "cortex_prometheus_rule_group_rules")) + + testCases := map[string]struct { + filter e2ecortex.RuleFilter + resultCheckFn func(assert.TestingT, []*ruler.RuleGroup, string, int) + iterations int + tokens []string + }{ + "List Rule Groups - Equal number of rule groups per page": { + filter: e2ecortex.RuleFilter{ + MaxRuleGroup: 20, + }, + resultCheckFn: func(t assert.TestingT, resultGroups []*ruler.RuleGroup, token string, iteration int) { + assert.Len(t, resultGroups, 20, "Expected %d rules but got %d", 20, len(resultGroups)) + }, + iterations: 5, + tokens: []string{ + ruler.GetRuleGroupNextToken(groupStateDescs[19].Group.Namespace, groupStateDescs[19].Group.Name), + ruler.GetRuleGroupNextToken(groupStateDescs[39].Group.Namespace, groupStateDescs[39].Group.Name), + ruler.GetRuleGroupNextToken(groupStateDescs[59].Group.Namespace, groupStateDescs[59].Group.Name), + ruler.GetRuleGroupNextToken(groupStateDescs[79].Group.Namespace, groupStateDescs[79].Group.Name), + "", + }, + }, + "List Rule Groups - Retrieve page 2 and 3": { + filter: e2ecortex.RuleFilter{ + MaxRuleGroup: 20, + NextToken: ruler.GetRuleGroupNextToken(groupStateDescs[19].Group.Namespace, groupStateDescs[19].Group.Name), + }, + resultCheckFn: func(t assert.TestingT, resultGroups []*ruler.RuleGroup, token string, iteration int) { + assert.Len(t, resultGroups, 20, "Expected %d rules but got %d", 20, len(resultGroups)) + }, + iterations: 2, + tokens: []string{ + ruler.GetRuleGroupNextToken(groupStateDescs[39].Group.Namespace, groupStateDescs[39].Group.Name), + ruler.GetRuleGroupNextToken(groupStateDescs[59].Group.Namespace, groupStateDescs[59].Group.Name), + }, + }, + } + + // For each test case, fetch the rules with configured filters, and ensure the results match. + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + filter := tc.filter + for i := 0; i < tc.iterations; i++ { + actualGroups, token, err := c.GetPrometheusRules(filter) + require.NoError(t, err) + tc.resultCheckFn(t, actualGroups, token, i) + require.Equal(t, tc.tokens[i], token) + filter.NextToken = token + } + }) + } +} + +func TestRulesAPIWithNoRules(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, rulestoreBucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Configure the ruler. + overrides := map[string]string{ + // Since we're not going to run any rule, we don't need the + // store-gateway to be configured to a valid address. + "-querier.store-gateway-addresses": "localhost:12345", + // Enable the bucket index so we can skip the initial bucket scan. + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-ruler.poll-interval": "5s", + } + + rulerFlags := mergeFlags( + BlocksStorageFlags(), + RulerFlags(), + RulerShardingFlags(consul.NetworkHTTPEndpoint()), + overrides, + ) + + // Start rulers. + ruler1 := e2ecortex.NewRuler("ruler-1", consul.NetworkHTTPEndpoint(), rulerFlags, "") + ruler2 := e2ecortex.NewRuler("ruler-2", consul.NetworkHTTPEndpoint(), rulerFlags, "") + ruler3 := e2ecortex.NewRuler("ruler-3", consul.NetworkHTTPEndpoint(), rulerFlags, "") + require.NoError(t, s.StartAndWaitReady(ruler1, ruler2, ruler3)) + + time.Sleep(5 * time.Second) + c, err := e2ecortex.NewClient("", "", "", ruler1.HTTPEndpoint(), "user-1") + require.NoError(t, err) + + testCases := map[string]struct { + filter e2ecortex.RuleFilter + }{ + "List Rule Groups With Filter": { + filter: e2ecortex.RuleFilter{ + MaxRuleGroup: 20, + }, + }, + "List All Rule Groups With No Filter": { + filter: e2ecortex.RuleFilter{}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + actualGroups, token, err := c.GetPrometheusRules(tc.filter) + require.NoError(t, err) + assert.Empty(t, actualGroups) + assert.Empty(t, token) + }) + } +} + func TestRulerAlertmanager(t *testing.T) { var namespaceOne = "test_/encoded_+namespace/?" ruleGroup := createTestRuleGroup(t) @@ -979,7 +1380,7 @@ func TestRulerDisablesRuleGroups(t *testing.T) { require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_prometheus_rule_group_rules"}, e2e.WithLabelMatchers(m1), e2e.WaitMissingMetrics)) filter := e2ecortex.RuleFilter{} - actualGroups, err := c.GetPrometheusRules(filter) + actualGroups, _, err := c.GetPrometheusRules(filter) require.NoError(t, err) assert.Equal(t, 1, len(actualGroups)) assert.Equal(t, "good_rule", actualGroups[0].Name) @@ -1125,7 +1526,7 @@ func TestRulerHAEvaluation(t *testing.T) { // assumes ownership, it might not immediately evaluate until it's time to evaluate. The following sleep is to ensure the // rulers have evaluated the rule groups time.Sleep(2100 * time.Millisecond) - results, err := c.GetPrometheusRules(e2ecortex.RuleFilter{}) + results, _, err := c.GetPrometheusRules(e2ecortex.RuleFilter{}) require.NoError(t, err) require.Equal(t, numRulesGroups, len(results)) for _, v := range results { @@ -1199,7 +1600,7 @@ func TestRulerKeepFiring(t *testing.T) { // Wait until rule group has tried to evaluate the rule. require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(1), []string{"cortex_prometheus_rule_evaluations_total"}, e2e.WithLabelMatchers(m), e2e.WaitMissingMetrics)) - groups, err := c.GetPrometheusRules(e2ecortex.RuleFilter{ + groups, _, err := c.GetPrometheusRules(e2ecortex.RuleFilter{ RuleNames: []string{ruleName}, }) require.NoError(t, err) @@ -1216,7 +1617,7 @@ func TestRulerKeepFiring(t *testing.T) { // Wait until rule group has tried to evaluate the rule. require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(5), []string{"cortex_prometheus_rule_evaluations_total"}, e2e.WithLabelMatchers(m), e2e.WaitMissingMetrics)) - updatedGroups, err := c.GetPrometheusRules(e2ecortex.RuleFilter{ + updatedGroups, _, err := c.GetPrometheusRules(e2ecortex.RuleFilter{ RuleNames: []string{ruleName}, }) require.NoError(t, err) @@ -1231,7 +1632,7 @@ func TestRulerKeepFiring(t *testing.T) { require.Greater(t, alert.Alerts[0].KeepFiringSince.UnixNano(), ts.UnixNano(), "KeepFiringSince value should be after expression is resolved") time.Sleep(10 * time.Second) // Sleep beyond keepFiringFor time - updatedGroups, err = c.GetPrometheusRules(e2ecortex.RuleFilter{ + updatedGroups, _, err = c.GetPrometheusRules(e2ecortex.RuleFilter{ RuleNames: []string{ruleName}, }) require.NoError(t, err) diff --git a/pkg/ruler/api.go b/pkg/ruler/api.go index 0294a78c61..90aa09e8da 100644 --- a/pkg/ruler/api.go +++ b/pkg/ruler/api.go @@ -50,7 +50,8 @@ type Alert struct { // RuleDiscovery has info for all rules type RuleDiscovery struct { - RuleGroups []*RuleGroup `json:"groups"` + RuleGroups []*RuleGroup `json:"groups"` + GroupNextToken string `json:"groupNextToken:omitempty"` } // RuleGroup has info for rules which are part of a group @@ -97,6 +98,11 @@ type recordingRule struct { EvaluationTime float64 `json:"evaluationTime"` } +type listRulesPaginationRequest struct { + MaxRuleGroups int32 + NextToken string +} + // API is used to handle HTTP requests for the ruler service type API struct { ruler *Ruler @@ -160,6 +166,12 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { return } + paginationRequest, err := parseListRulesPaginationRequest(req) + if err != nil { + util_api.RespondError(logger, w, v1.ErrBadData, err.Error(), http.StatusBadRequest) + return + } + rulesRequest := RulesRequest{ RuleNames: req.Form["rule_name[]"], RuleGroupNames: req.Form["rule_group[]"], @@ -169,19 +181,25 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { Health: health, Matchers: req.Form["match[]"], ExcludeAlerts: excludeAlerts, + MaxRuleGroups: paginationRequest.MaxRuleGroups, + NextToken: paginationRequest.NextToken, } w.Header().Set("Content-Type", "application/json") - rgs, err := a.ruler.GetRules(req.Context(), rulesRequest) + response, err := a.ruler.GetRules(req.Context(), rulesRequest) if err != nil { util_api.RespondError(logger, w, v1.ErrServer, err.Error(), http.StatusInternalServerError) return } - groups := make([]*RuleGroup, 0, len(rgs)) + if response.Groups == nil { + response.Groups = make([]*GroupStateDesc, 0) + } - for _, g := range rgs { + groups := make([]*RuleGroup, 0, len(response.Groups)) + + for _, g := range response.Groups { grp := RuleGroup{ Name: g.Group.Name, File: g.Group.Namespace, @@ -239,7 +257,6 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { groups = append(groups, &grp) } - // keep data.groups are in order sort.Slice(groups, func(i, j int) bool { if groups[i].File == groups[j].File { return groups[i].Name < groups[j].Name @@ -249,7 +266,7 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { b, err := json.Marshal(&util_api.Response{ Status: "success", - Data: &RuleDiscovery{RuleGroups: groups}, + Data: &RuleDiscovery{RuleGroups: groups, GroupNextToken: response.NextToken}, }) if err != nil { level.Error(logger).Log("msg", "error marshaling json response", "err", err) @@ -263,6 +280,44 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { } } +func parseListRulesPaginationRequest(req *http.Request) (listRulesPaginationRequest, error) { + var ( + returnMaxRuleGroups = int32(-1) + ) + + maxGroups := req.URL.Query().Get("group_limit") + nextToken := req.URL.Query().Get("group_next_token") + + if nextToken != "" && maxGroups == "" { + return listRulesPaginationRequest{ + MaxRuleGroups: -1, + NextToken: "", + }, errors.New("group_limit needs to be present in order to paginate over the groups") + } + + if maxGroups != "" { + parsedMaxGroups, err := strconv.ParseInt(maxGroups, 10, 32) + if err != nil { + return listRulesPaginationRequest{ + MaxRuleGroups: -1, + NextToken: "", + }, errors.New("group_limit needs to be a valid number") + } + if parsedMaxGroups <= 0 { + return listRulesPaginationRequest{ + MaxRuleGroups: -1, + NextToken: "", + }, errors.New("group_limit needs to be greater than 0") + } + returnMaxRuleGroups = int32(parsedMaxGroups) + } + + return listRulesPaginationRequest{ + MaxRuleGroups: returnMaxRuleGroups, + NextToken: nextToken, + }, nil +} + func parseExcludeAlerts(r *http.Request) (bool, error) { excludeAlertsParam := strings.ToLower(r.URL.Query().Get("exclude_alerts")) @@ -288,15 +343,18 @@ func (a *API) PrometheusAlerts(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") rulesRequest := RulesRequest{ - Type: alertingRuleFilter, + Type: alertingRuleFilter, + MaxRuleGroups: -1, } - rgs, err := a.ruler.GetRules(req.Context(), rulesRequest) + rulesResponse, err := a.ruler.GetRules(req.Context(), rulesRequest) if err != nil { util_api.RespondError(logger, w, v1.ErrServer, err.Error(), http.StatusInternalServerError) return } + rgs := rulesResponse.Groups + alerts := []*Alert{} for _, g := range rgs { diff --git a/pkg/ruler/merger.go b/pkg/ruler/merger.go index 7ae7e69317..1a14e67c4c 100644 --- a/pkg/ruler/merger.go +++ b/pkg/ruler/merger.go @@ -1,6 +1,7 @@ package ruler import ( + "sort" "time" promRules "github.com/prometheus/prometheus/rules" @@ -9,26 +10,51 @@ import ( // mergeGroupStateDesc removes duplicates from the provided []*GroupStateDesc by keeping the GroupStateDesc with the // latest information. It uses the EvaluationTimestamp of the GroupStateDesc and the EvaluationTimestamp of the // ActiveRules in a GroupStateDesc to determine the which GroupStateDesc has the latest information. -func mergeGroupStateDesc(in []*GroupStateDesc) []*GroupStateDesc { +// It also truncates rule groups if maxRuleGroups > 0 +func mergeGroupStateDesc(ruleResponses []*RulesResponse, maxRuleGroups int32, dedup bool) *RulesResponse { + + var groupsStateDescs []*GroupStateDesc + + for _, resp := range ruleResponses { + groupsStateDescs = append(groupsStateDescs, resp.Groups...) + } + states := make(map[string]*GroupStateDesc) rgTime := make(map[string]time.Time) - for _, state := range in { - latestTs := state.EvaluationTimestamp - for _, r := range state.ActiveRules { - if latestTs.Before(r.EvaluationTimestamp) { - latestTs = r.EvaluationTimestamp + groups := make([]*GroupStateDesc, 0) + if dedup { + for _, state := range groupsStateDescs { + latestTs := state.EvaluationTimestamp + for _, r := range state.ActiveRules { + if latestTs.Before(r.EvaluationTimestamp) { + latestTs = r.EvaluationTimestamp + } } + key := promRules.GroupKey(state.Group.Namespace, state.Group.Name) + ts, ok := rgTime[key] + if !ok || ts.Before(latestTs) { + states[key] = state + rgTime[key] = latestTs + } + } + for _, state := range states { + groups = append(groups, state) } - key := promRules.GroupKey(state.Group.Namespace, state.Group.Name) - ts, ok := rgTime[key] - if !ok || ts.Before(latestTs) { - states[key] = state - rgTime[key] = latestTs + } else { + groups = groupsStateDescs + } + + if maxRuleGroups > 0 { + //Need to sort here before we truncate + sort.Sort(PaginatedGroupStates(groups)) + result, nextToken := generatePage(groups, int(maxRuleGroups)) + return &RulesResponse{ + Groups: result, + NextToken: nextToken, } } - groups := make([]*GroupStateDesc, 0, len(states)) - for _, state := range states { - groups = append(groups, state) + return &RulesResponse{ + Groups: groups, + NextToken: "", } - return groups } diff --git a/pkg/ruler/merger_test.go b/pkg/ruler/merger_test.go index d4bae6d008..bc002b112b 100644 --- a/pkg/ruler/merger_test.go +++ b/pkg/ruler/merger_test.go @@ -69,44 +69,142 @@ func TestMergeGroupStateDesc(t *testing.T) { } type testCase struct { - input []*GroupStateDesc - expectedOutput []*GroupStateDesc + input []*RulesResponse + expectedOutput *RulesResponse + maxRuleGroups int32 } testCases := map[string]testCase{ "No duplicate": { - input: []*GroupStateDesc{&gs1, &gs2}, - expectedOutput: []*GroupStateDesc{&gs1, &gs2}, + input: []*RulesResponse{ + { + Groups: []*GroupStateDesc{&gs1, &gs2}, + NextToken: "", + }, + }, + expectedOutput: &RulesResponse{ + Groups: []*GroupStateDesc{&gs1, &gs2}, + NextToken: "", + }, + maxRuleGroups: 2, }, "No duplicate but not evaluated": { - input: []*GroupStateDesc{&gs1NotRun, &gs2NotRun}, - expectedOutput: []*GroupStateDesc{&gs1NotRun, &gs2NotRun}, + input: []*RulesResponse{ + { + Groups: []*GroupStateDesc{&gs1NotRun, &gs2NotRun}, + NextToken: "", + }, + }, + expectedOutput: &RulesResponse{ + Groups: []*GroupStateDesc{&gs1NotRun, &gs2NotRun}, + NextToken: "", + }, + maxRuleGroups: 2, }, "With exact duplicate": { - input: []*GroupStateDesc{&gs1, &gs2NotRun, &gs1, &gs2NotRun}, - expectedOutput: []*GroupStateDesc{&gs1, &gs2NotRun}, + input: []*RulesResponse{ + { + Groups: []*GroupStateDesc{&gs1, &gs2NotRun}, + NextToken: "", + }, + { + Groups: []*GroupStateDesc{&gs1, &gs2NotRun}, + NextToken: "", + }, + }, + expectedOutput: &RulesResponse{ + Groups: []*GroupStateDesc{&gs1, &gs2NotRun}, + NextToken: "", + }, + maxRuleGroups: 2, }, "With duplicates that are not evaluated": { - input: []*GroupStateDesc{&gs1, &gs2, &gs1NotRun, &gs2NotRun}, - expectedOutput: []*GroupStateDesc{&gs1, &gs2}, + input: []*RulesResponse{ + { + Groups: []*GroupStateDesc{&gs1, &gs2}, + NextToken: "", + }, + { + Groups: []*GroupStateDesc{&gs1NotRun}, + NextToken: "", + }, + { + Groups: []*GroupStateDesc{&gs2NotRun}, + NextToken: "", + }, + }, + expectedOutput: &RulesResponse{ + Groups: []*GroupStateDesc{&gs1, &gs2}, + NextToken: "", + }, + maxRuleGroups: 2, }, "With duplicate with a new newer rule evaluation": { - input: []*GroupStateDesc{&gs3, &gs1, &gs2, &gs1NotRun}, - expectedOutput: []*GroupStateDesc{&gs1, &gs3}, + input: []*RulesResponse{ + { + Groups: []*GroupStateDesc{&gs3}, + NextToken: GetRuleGroupNextToken(gs3.Group.Name, gs3.Group.Name), + }, + { + Groups: []*GroupStateDesc{&gs1}, + NextToken: "", + }, + { + Groups: []*GroupStateDesc{&gs2}, + NextToken: "", + }, + { + Groups: []*GroupStateDesc{&gs1NotRun}, + NextToken: "", + }, + }, + expectedOutput: &RulesResponse{ + Groups: []*GroupStateDesc{&gs1, &gs3}, + NextToken: "", + }, + maxRuleGroups: 2, + }, + "With duplicate with a new newer rule evaluation - pagination": { + input: []*RulesResponse{ + { + Groups: []*GroupStateDesc{&gs3}, + NextToken: GetRuleGroupNextToken(gs3.Group.Namespace, gs3.Group.Name), + }, + { + Groups: []*GroupStateDesc{&gs1}, + NextToken: GetRuleGroupNextToken(gs1.Group.Namespace, gs1.Group.Name), + }, + { + Groups: []*GroupStateDesc{&gs2}, + NextToken: GetRuleGroupNextToken(gs2.Group.Namespace, gs2.Group.Name), + }, + { + Groups: []*GroupStateDesc{&gs1NotRun}, + NextToken: GetRuleGroupNextToken(gs1NotRun.Group.Namespace, gs1NotRun.Group.Name), + }, + }, + expectedOutput: &RulesResponse{ + Groups: []*GroupStateDesc{&gs1}, + NextToken: GetRuleGroupNextToken(gs1.Group.Namespace, gs1.Group.Name), + }, + maxRuleGroups: 1, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - out := mergeGroupStateDesc(tc.input) - slices.SortFunc(out, func(a, b *GroupStateDesc) int { + + out := mergeGroupStateDesc(tc.input, tc.maxRuleGroups, true) + slices.SortFunc(out.Groups, func(a, b *GroupStateDesc) int { fileCompare := strings.Compare(a.Group.Namespace, b.Group.Namespace) if fileCompare != 0 { return fileCompare } return strings.Compare(a.Group.Name, b.Group.Name) }) - require.Equal(t, len(tc.expectedOutput), len(out)) + require.Equal(t, int(tc.maxRuleGroups), len(out.Groups)) + t.Log(tc.expectedOutput) + t.Log(out) require.True(t, reflect.DeepEqual(tc.expectedOutput, out)) }) } diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index efc11eef9f..d77b4d0a41 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -8,6 +8,7 @@ import ( "net/http" "net/url" "path/filepath" + "sort" "strings" "sync" "time" @@ -988,20 +989,28 @@ func (r *Ruler) filterBackupRuleGroups(userID string, ruleGroups []*rulespb.Rule // GetRules retrieves the running rules from this ruler and all running rulers in the ring if // sharding is enabled -func (r *Ruler) GetRules(ctx context.Context, rulesRequest RulesRequest) ([]*GroupStateDesc, error) { +func (r *Ruler) GetRules(ctx context.Context, rulesRequest RulesRequest) (*RulesResponse, error) { userID, err := tenant.TenantID(ctx) if err != nil { return nil, fmt.Errorf("no user id found in context") } if r.cfg.EnableSharding { - return r.getShardedRules(ctx, userID, rulesRequest) + resp, err := r.getShardedRules(ctx, userID, rulesRequest) + if resp == nil { + return &RulesResponse{ + Groups: make([]*GroupStateDesc, 0), + NextToken: "", + }, err + } + return resp, err } - return r.getLocalRules(userID, rulesRequest, false) + response, err := r.getLocalRules(userID, rulesRequest, false) + return &response, err } -func (r *Ruler) getLocalRules(userID string, rulesRequest RulesRequest, includeBackups bool) ([]*GroupStateDesc, error) { +func (r *Ruler) getLocalRules(userID string, rulesRequest RulesRequest, includeBackups bool) (RulesResponse, error) { groups := r.manager.GetRules(userID) groupDescs := make([]*GroupStateDesc, 0, len(groups)) @@ -1023,7 +1032,7 @@ func (r *Ruler) getLocalRules(userID string, rulesRequest RulesRequest, includeB health := rulesRequest.Health matcherSets, err := parseMatchersParam(rulesRequest.Matchers) if err != nil { - return nil, errors.Wrap(err, "error parsing matcher values") + return RulesResponse{}, errors.Wrap(err, "error parsing matcher values") } returnAlerts := ruleType == "" || ruleType == alertingRuleFilter @@ -1033,7 +1042,7 @@ func (r *Ruler) getLocalRules(userID string, rulesRequest RulesRequest, includeB // The mapped filename is url path escaped encoded to make handling `/` characters easier decodedNamespace, err := url.PathUnescape(strings.TrimPrefix(group.File(), prefix)) if err != nil { - return nil, errors.Wrap(err, "unable to decode rule filename") + return RulesResponse{}, errors.Wrap(err, "unable to decode rule filename") } if len(fileSet) > 0 { if _, OK := fileSet[decodedNamespace]; !OK { @@ -1137,7 +1146,7 @@ func (r *Ruler) getLocalRules(userID string, rulesRequest RulesRequest, includeB EvaluationDuration: rule.GetEvaluationDuration(), } default: - return nil, errors.Errorf("failed to assert type of rule '%v'", rule.Name()) + return RulesResponse{}, errors.Errorf("failed to assert type of rule '%v'", rule.Name()) } groupDesc.ActiveRules = append(groupDesc.ActiveRules, ruleDesc) } @@ -1146,24 +1155,51 @@ func (r *Ruler) getLocalRules(userID string, rulesRequest RulesRequest, includeB } } - if !includeBackups { - return groupDescs, nil + combinedRuleStateDescs := groupDescs + if includeBackups { + backupGroups := r.manager.GetBackupRules(userID) + backupGroupDescs, err := r.ruleGroupListToGroupStateDesc(userID, backupGroups, groupListFilter{ + ruleNameSet, + ruleGroupNameSet, + fileSet, + returnAlerts, + returnRecording, + matcherSets, + }) + if err != nil { + return RulesResponse{}, err + } + combinedRuleStateDescs = append(combinedRuleStateDescs, backupGroupDescs...) } - backupGroups := r.manager.GetBackupRules(userID) - backupGroupDescs, err := r.ruleGroupListToGroupStateDesc(userID, backupGroups, groupListFilter{ - ruleNameSet, - ruleGroupNameSet, - fileSet, - returnAlerts, - returnRecording, - matcherSets, - }) - if err != nil { - return nil, err + if rulesRequest.MaxRuleGroups <= 0 { + return RulesResponse{ + Groups: combinedRuleStateDescs, + NextToken: "", + }, nil } - return append(groupDescs, backupGroupDescs...), nil + sort.Sort(PaginatedGroupStates(combinedRuleStateDescs)) + + resultingGroupDescs := make([]*GroupStateDesc, 0, len(combinedRuleStateDescs)) + for _, group := range combinedRuleStateDescs { + groupID := GetRuleGroupNextToken(group.Group.Namespace, group.Group.Name) + + // Only want groups whose groupID is greater than the token. This comparison works because + // we sort by that groupID + if len(rulesRequest.NextToken) > 0 && rulesRequest.NextToken >= groupID { + continue + } + if len(group.ActiveRules) > 0 { + resultingGroupDescs = append(resultingGroupDescs, group) + } + } + + resultingGroupDescs, nextToken := generatePage(resultingGroupDescs, int(rulesRequest.MaxRuleGroups)) + return RulesResponse{ + Groups: resultingGroupDescs, + NextToken: nextToken, + }, nil } type groupListFilter struct { @@ -1272,7 +1308,7 @@ func (r *Ruler) ruleGroupListToGroupStateDesc(userID string, backupGroups rulesp return groupDescs, nil } -func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest RulesRequest) ([]*GroupStateDesc, error) { +func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest RulesRequest) (*RulesResponse, error) { ring := ring.ReadRing(r.ring) if shardSize := r.limits.RulerTenantShardSize(userID); shardSize > 0 && r.cfg.ShardingStrategy == util.ShardingStrategyShuffle { @@ -1291,7 +1327,7 @@ func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest var ( mtx sync.Mutex - merged []*GroupStateDesc + merged []*RulesResponse errCount int ) @@ -1318,8 +1354,12 @@ func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest RuleGroupNames: rulesRequest.GetRuleGroupNames(), Files: rulesRequest.GetFiles(), Type: rulesRequest.GetType(), - ExcludeAlerts: rulesRequest.GetExcludeAlerts(), + State: rulesRequest.GetState(), + Health: rulesRequest.GetHealth(), Matchers: rulesRequest.GetMatchers(), + ExcludeAlerts: rulesRequest.GetExcludeAlerts(), + MaxRuleGroups: rulesRequest.GetMaxRuleGroups(), + NextToken: rulesRequest.GetNextToken(), }) if err != nil { @@ -1341,17 +1381,23 @@ func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest } mtx.Lock() - merged = append(merged, newGrps.Groups...) + merged = append(merged, newGrps) mtx.Unlock() return nil }) - if err == nil && (r.cfg.RulesBackupEnabled() || r.cfg.APIDeduplicateRules) { - merged = mergeGroupStateDesc(merged) + if err == nil { + if r.cfg.RulesBackupEnabled() || r.cfg.APIDeduplicateRules { + return mergeGroupStateDesc(merged, rulesRequest.MaxRuleGroups, true), nil + } + return mergeGroupStateDesc(merged, rulesRequest.MaxRuleGroups, false), nil } - return merged, err + return &RulesResponse{ + Groups: make([]*GroupStateDesc, 0), + NextToken: "", + }, err } // Rules implements the rules service @@ -1362,12 +1408,12 @@ func (r *Ruler) Rules(ctx context.Context, in *RulesRequest) (*RulesResponse, er return nil, fmt.Errorf("no user id found in context") } - groupDescs, err := r.getLocalRules(userID, *in, r.cfg.RulesBackupEnabled()) + response, err := r.getLocalRules(userID, *in, r.cfg.RulesBackupEnabled()) if err != nil { return nil, err } - return &RulesResponse{Groups: groupDescs}, nil + return &response, nil } // HasMaxRuleGroupsLimit check if RulerMaxRuleGroupsPerTenant limit is set for the userID. diff --git a/pkg/ruler/ruler.pb.go b/pkg/ruler/ruler.pb.go index b0078f4fbf..2b56233af1 100644 --- a/pkg/ruler/ruler.pb.go +++ b/pkg/ruler/ruler.pb.go @@ -47,6 +47,8 @@ type RulesRequest struct { Health string `protobuf:"bytes,6,opt,name=health,proto3" json:"health,omitempty"` Matchers []string `protobuf:"bytes,7,rep,name=matchers,proto3" json:"matchers,omitempty"` ExcludeAlerts bool `protobuf:"varint,8,opt,name=excludeAlerts,proto3" json:"excludeAlerts,omitempty"` + MaxRuleGroups int32 `protobuf:"varint,9,opt,name=maxRuleGroups,proto3" json:"maxRuleGroups,omitempty"` + NextToken string `protobuf:"bytes,10,opt,name=nextToken,proto3" json:"nextToken,omitempty"` } func (m *RulesRequest) Reset() { *m = RulesRequest{} } @@ -137,6 +139,20 @@ func (m *RulesRequest) GetExcludeAlerts() bool { return false } +func (m *RulesRequest) GetMaxRuleGroups() int32 { + if m != nil { + return m.MaxRuleGroups + } + return 0 +} + +func (m *RulesRequest) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + type LivenessCheckRequest struct { } @@ -216,7 +232,8 @@ func (m *LivenessCheckResponse) GetState() int32 { } type RulesResponse struct { - Groups []*GroupStateDesc `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` + Groups []*GroupStateDesc `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` + NextToken string `protobuf:"bytes,2,opt,name=nextToken,proto3" json:"nextToken,omitempty"` } func (m *RulesResponse) Reset() { *m = RulesResponse{} } @@ -258,6 +275,13 @@ func (m *RulesResponse) GetGroups() []*GroupStateDesc { return nil } +func (m *RulesResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + // GroupStateDesc is a proto representation of a cortex rule group type GroupStateDesc struct { Group *rulespb.RuleGroupDesc `protobuf:"bytes,1,opt,name=group,proto3" json:"group,omitempty"` @@ -532,60 +556,62 @@ func init() { func init() { proto.RegisterFile("ruler.proto", fileDescriptor_9ecbec0a4cfddea6) } var fileDescriptor_9ecbec0a4cfddea6 = []byte{ - // 845 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xf6, 0xb4, 0x75, 0x9a, 0xbc, 0xb4, 0x5d, 0x31, 0xcd, 0xae, 0x4c, 0xa8, 0x9c, 0x28, 0x20, - 0x14, 0x21, 0xad, 0x23, 0x85, 0x95, 0x10, 0x07, 0x40, 0x29, 0xbb, 0xcb, 0xa5, 0x42, 0x2b, 0x07, - 0xb8, 0x46, 0x13, 0x67, 0xe2, 0x98, 0x3a, 0xb6, 0x99, 0x19, 0x47, 0xe5, 0xc6, 0x9d, 0xcb, 0x1e, - 0x39, 0x73, 0xe2, 0x4f, 0xd9, 0x63, 0xc5, 0x69, 0x85, 0xd0, 0x42, 0xd3, 0x0b, 0x27, 0xb4, 0x7f, - 0x02, 0x9a, 0x1f, 0x6e, 0xe2, 0x12, 0xa4, 0x8d, 0x50, 0x2f, 0xed, 0xbc, 0x1f, 0xdf, 0x9b, 0x79, - 0xdf, 0xfb, 0xf2, 0x0c, 0x75, 0x96, 0xc7, 0x94, 0x79, 0x19, 0x4b, 0x45, 0x8a, 0x6d, 0x65, 0x34, - 0x1b, 0x61, 0x1a, 0xa6, 0xca, 0xd3, 0x93, 0x27, 0x1d, 0x6c, 0xba, 0x61, 0x9a, 0x86, 0x31, 0xed, - 0x29, 0x6b, 0x9c, 0x4f, 0x7b, 0x93, 0x9c, 0x11, 0x11, 0xa5, 0x89, 0x89, 0xb7, 0x6e, 0xc7, 0x45, - 0x34, 0xa7, 0x5c, 0x90, 0x79, 0x66, 0x12, 0x3e, 0x0e, 0x23, 0x31, 0xcb, 0xc7, 0x5e, 0x90, 0xce, - 0x7b, 0x41, 0xca, 0x04, 0xbd, 0xc8, 0x58, 0xfa, 0x2d, 0x0d, 0x84, 0xb1, 0x7a, 0xd9, 0x79, 0x58, - 0x04, 0xc6, 0xe6, 0x60, 0xa0, 0x9f, 0xbc, 0x09, 0x54, 0x3d, 0x5e, 0xfd, 0xe5, 0xd9, 0x58, 0xff, - 0xd7, 0xf0, 0xce, 0xdf, 0x08, 0x0e, 0x7c, 0x69, 0xfb, 0xf4, 0xbb, 0x9c, 0x72, 0x81, 0x4f, 0xa0, - 0x26, 0xe3, 0x5f, 0x92, 0x39, 0xe5, 0x0e, 0x6a, 0xef, 0x76, 0x6b, 0xfe, 0xca, 0x81, 0xdf, 0x87, - 0x23, 0x69, 0x7c, 0xc1, 0xd2, 0x3c, 0xd3, 0x29, 0x3b, 0x2a, 0xe5, 0x96, 0x17, 0x37, 0xc0, 0x9e, - 0x46, 0x31, 0xe5, 0xce, 0xae, 0x0a, 0x6b, 0x03, 0x63, 0xd8, 0x13, 0xdf, 0x67, 0xd4, 0xd9, 0x6b, - 0xa3, 0x6e, 0xcd, 0x57, 0x67, 0x99, 0xc9, 0x05, 0x11, 0xd4, 0xb1, 0x95, 0x53, 0x1b, 0xf8, 0x01, - 0x54, 0x66, 0x94, 0xc4, 0x62, 0xe6, 0x54, 0x94, 0xdb, 0x58, 0xb8, 0x09, 0xd5, 0x39, 0x11, 0xc1, - 0x8c, 0x32, 0xee, 0xec, 0xab, 0xd2, 0x37, 0x36, 0x7e, 0x0f, 0x0e, 0xe9, 0x45, 0x10, 0xe7, 0x13, - 0x3a, 0x88, 0x29, 0x13, 0xdc, 0xa9, 0xb6, 0x51, 0xb7, 0xea, 0x97, 0x9d, 0x9d, 0x07, 0xd0, 0x38, - 0x8b, 0x16, 0x34, 0xa1, 0x9c, 0x7f, 0x3e, 0xa3, 0xc1, 0xb9, 0xe9, 0xbb, 0xf3, 0x10, 0xee, 0xdf, - 0xf2, 0xf3, 0x2c, 0x4d, 0xf8, 0xda, 0x03, 0x51, 0x1b, 0x75, 0x6d, 0xf3, 0xc0, 0xce, 0xa7, 0x70, - 0x68, 0x68, 0x33, 0x69, 0x0f, 0xa1, 0x12, 0xca, 0xfe, 0x35, 0x69, 0xf5, 0xfe, 0x7d, 0x4f, 0xcb, - 0x47, 0x91, 0x32, 0x94, 0x98, 0xc7, 0x94, 0x07, 0xbe, 0x49, 0xea, 0xfc, 0xbc, 0x03, 0x47, 0xe5, - 0x10, 0xfe, 0x00, 0x6c, 0x15, 0x54, 0x17, 0xd5, 0xfb, 0x0d, 0x4f, 0xcf, 0xc9, 0x2f, 0x98, 0x55, - 0x78, 0x9d, 0x82, 0x3f, 0x82, 0x03, 0x12, 0x88, 0x68, 0x41, 0x47, 0x2a, 0x49, 0x4d, 0xa1, 0x80, - 0x30, 0x05, 0x59, 0x5d, 0x59, 0xd7, 0x99, 0xea, 0xb9, 0xf8, 0x1b, 0x38, 0xa6, 0x0b, 0x12, 0xe7, - 0x4a, 0x9e, 0x5f, 0x15, 0x32, 0x74, 0x76, 0xd5, 0x95, 0x4d, 0x4f, 0x0b, 0xd5, 0x2b, 0x84, 0xea, - 0xdd, 0x64, 0x9c, 0x56, 0x5f, 0xbc, 0x6a, 0x59, 0xcf, 0xff, 0x68, 0x21, 0x7f, 0x53, 0x01, 0x3c, - 0x04, 0xbc, 0x72, 0x3f, 0x36, 0xf2, 0x57, 0x83, 0xae, 0xf7, 0xdf, 0xfe, 0x57, 0xd9, 0x22, 0x41, - 0x57, 0xfd, 0x49, 0x56, 0xdd, 0x00, 0xef, 0xfc, 0xbe, 0xa3, 0x59, 0x5e, 0x71, 0xf4, 0x2e, 0xec, - 0xc9, 0x16, 0x0d, 0x45, 0xf7, 0xd6, 0x28, 0x52, 0xad, 0xaa, 0xe0, 0x6a, 0x62, 0x3b, 0x9b, 0x25, - 0xb5, 0x5b, 0x92, 0xd4, 0x09, 0xd4, 0x62, 0xc2, 0xc5, 0x13, 0xc6, 0x52, 0x66, 0x94, 0xb9, 0x72, - 0xc8, 0xb1, 0x12, 0xad, 0x26, 0xbb, 0x34, 0x56, 0xa5, 0xa6, 0xb5, 0xb1, 0xea, 0xa4, 0xff, 0xa2, - 0xb7, 0x72, 0x37, 0xf4, 0xee, 0xff, 0x3f, 0x7a, 0x7f, 0xb5, 0xe1, 0xa8, 0xdc, 0x47, 0x59, 0xec, - 0x37, 0xd4, 0x25, 0x50, 0x89, 0xc9, 0x98, 0xc6, 0x85, 0xce, 0x8e, 0xbd, 0x62, 0x17, 0x79, 0x67, - 0xd2, 0xff, 0x8c, 0x44, 0xec, 0x74, 0x20, 0xef, 0xfa, 0xed, 0x55, 0x6b, 0xab, 0x5d, 0xa6, 0xf1, - 0x83, 0x09, 0xc9, 0x04, 0x65, 0xbe, 0xb9, 0x05, 0x5f, 0x40, 0x9d, 0x24, 0x49, 0x2a, 0xd4, 0x33, - 0xf5, 0x0e, 0xb9, 0xbb, 0x4b, 0xd7, 0xaf, 0x92, 0xfd, 0x4b, 0x9e, 0xf4, 0x8a, 0x42, 0xbe, 0x36, - 0xf0, 0x00, 0x6a, 0xe6, 0xd7, 0x46, 0x84, 0xda, 0x53, 0x6f, 0x3a, 0xcb, 0xaa, 0x86, 0x0d, 0x04, - 0xfe, 0x0c, 0xaa, 0xd3, 0x88, 0xd1, 0x89, 0xac, 0xb0, 0x8d, 0x1a, 0xf6, 0x15, 0x6a, 0x20, 0xf0, - 0x13, 0xa8, 0x33, 0xca, 0xd3, 0x78, 0xa1, 0x6b, 0xec, 0x6f, 0x51, 0x03, 0x0a, 0xe0, 0x40, 0xe0, - 0xa7, 0x70, 0x20, 0xc5, 0x3d, 0xe2, 0x34, 0x11, 0xb2, 0x4e, 0x75, 0x9b, 0x3a, 0x12, 0x39, 0xa4, - 0x89, 0xd0, 0xcf, 0x59, 0x90, 0x38, 0x9a, 0x8c, 0xf2, 0x44, 0x44, 0xb1, 0x53, 0xdb, 0xa6, 0x8c, - 0x02, 0x7e, 0x2d, 0x71, 0xf8, 0x19, 0xbc, 0x75, 0x4e, 0x69, 0x36, 0x9a, 0x46, 0x2c, 0x4a, 0xc2, - 0x11, 0x8f, 0x92, 0x80, 0x3a, 0xb0, 0x45, 0xb1, 0x7b, 0x12, 0xfe, 0x54, 0xa1, 0x87, 0x12, 0xdc, - 0xff, 0x11, 0x81, 0x2d, 0xf7, 0x01, 0xc3, 0x8f, 0xf4, 0x81, 0xe3, 0xe3, 0xb5, 0xb5, 0x58, 0x7c, - 0xe7, 0x9a, 0x8d, 0xb2, 0x53, 0x6f, 0xf1, 0x8e, 0x85, 0xcf, 0xe0, 0xb0, 0xf4, 0x1d, 0xc0, 0xef, - 0x98, 0xc4, 0x4d, 0x5f, 0x8d, 0xe6, 0xc9, 0xe6, 0x60, 0x51, 0xed, 0xf4, 0xd1, 0xe5, 0x95, 0x6b, - 0xbd, 0xbc, 0x72, 0xad, 0xd7, 0x57, 0x2e, 0xfa, 0x61, 0xe9, 0xa2, 0x5f, 0x96, 0x2e, 0x7a, 0xb1, - 0x74, 0xd1, 0xe5, 0xd2, 0x45, 0x7f, 0x2e, 0x5d, 0xf4, 0xd7, 0xd2, 0xb5, 0x5e, 0x2f, 0x5d, 0xf4, - 0xfc, 0xda, 0xb5, 0x2e, 0xaf, 0x5d, 0xeb, 0xe5, 0xb5, 0x6b, 0x8d, 0x2b, 0xaa, 0xe5, 0x0f, 0xff, - 0x09, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x1f, 0x82, 0x20, 0x82, 0x08, 0x00, 0x00, + // 880 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0xd3, 0x26, 0x4d, 0x5e, 0xda, 0xae, 0x98, 0x66, 0x57, 0x26, 0x54, 0x6e, 0x15, 0x10, + 0xaa, 0x90, 0xd6, 0x91, 0xca, 0x4a, 0x88, 0x03, 0x42, 0x29, 0xbb, 0xcb, 0xa5, 0x42, 0x2b, 0x77, + 0xe1, 0x84, 0x14, 0x4d, 0x9c, 0x57, 0xc7, 0xd4, 0xb1, 0xcd, 0xcc, 0x38, 0x0a, 0x37, 0xee, 0x5c, + 0xf6, 0xc8, 0x99, 0x13, 0x67, 0x3e, 0xc5, 0x1e, 0x2b, 0x4e, 0x2b, 0x84, 0x16, 0x9a, 0x5e, 0x38, + 0xee, 0x47, 0x40, 0xf3, 0xc6, 0x6e, 0xe2, 0x12, 0xa4, 0x56, 0xab, 0x5e, 0x92, 0x79, 0x7f, 0x7e, + 0xbf, 0x79, 0xff, 0xe6, 0x19, 0x5a, 0x22, 0x8b, 0x50, 0xb8, 0xa9, 0x48, 0x54, 0xc2, 0x6a, 0x24, + 0x74, 0xda, 0x41, 0x12, 0x24, 0xa4, 0xe9, 0xe9, 0x93, 0x31, 0x76, 0x9c, 0x20, 0x49, 0x82, 0x08, + 0x7b, 0x24, 0x0d, 0xb3, 0xd3, 0xde, 0x28, 0x13, 0x5c, 0x85, 0x49, 0x9c, 0xdb, 0xf7, 0xae, 0xdb, + 0x55, 0x38, 0x41, 0xa9, 0xf8, 0x24, 0xcd, 0x1d, 0x3e, 0x0d, 0x42, 0x35, 0xce, 0x86, 0xae, 0x9f, + 0x4c, 0x7a, 0x7e, 0x22, 0x14, 0xce, 0x52, 0x91, 0x7c, 0x87, 0xbe, 0xca, 0xa5, 0x5e, 0x7a, 0x16, + 0x14, 0x86, 0x61, 0x7e, 0xc8, 0xa1, 0x9f, 0xdd, 0x04, 0x4a, 0xc1, 0xd3, 0xaf, 0x4c, 0x87, 0xe6, + 0xdf, 0xc0, 0xbb, 0xbf, 0x55, 0x61, 0xd3, 0xd3, 0xb2, 0x87, 0xdf, 0x67, 0x28, 0x15, 0xdb, 0x85, + 0xa6, 0xb6, 0x7f, 0xc5, 0x27, 0x28, 0x6d, 0x6b, 0x7f, 0xed, 0xa0, 0xe9, 0x2d, 0x14, 0xec, 0x43, + 0xd8, 0xd6, 0xc2, 0x97, 0x22, 0xc9, 0x52, 0xe3, 0x52, 0x25, 0x97, 0x6b, 0x5a, 0xd6, 0x86, 0xda, + 0x69, 0x18, 0xa1, 0xb4, 0xd7, 0xc8, 0x6c, 0x04, 0xc6, 0x60, 0x5d, 0xfd, 0x90, 0xa2, 0xbd, 0xbe, + 0x6f, 0x1d, 0x34, 0x3d, 0x3a, 0x6b, 0x4f, 0xa9, 0xb8, 0x42, 0xbb, 0x46, 0x4a, 0x23, 0xb0, 0x07, + 0x50, 0x1f, 0x23, 0x8f, 0xd4, 0xd8, 0xae, 0x93, 0x3a, 0x97, 0x58, 0x07, 0x1a, 0x13, 0xae, 0xfc, + 0x31, 0x0a, 0x69, 0x6f, 0x10, 0xf5, 0x95, 0xcc, 0x3e, 0x80, 0x2d, 0x9c, 0xf9, 0x51, 0x36, 0xc2, + 0x7e, 0x84, 0x42, 0x49, 0xbb, 0xb1, 0x6f, 0x1d, 0x34, 0xbc, 0xb2, 0x52, 0x7b, 0x4d, 0xf8, 0xcc, + 0x2b, 0xc2, 0x95, 0x76, 0x73, 0xdf, 0x3a, 0xa8, 0x79, 0x65, 0xa5, 0xae, 0x42, 0x8c, 0x33, 0xf5, + 0x3c, 0x39, 0xc3, 0xd8, 0x06, 0x0a, 0x61, 0xa1, 0xe8, 0x3e, 0x80, 0xf6, 0x71, 0x38, 0xc5, 0x18, + 0xa5, 0xfc, 0x62, 0x8c, 0xfe, 0x59, 0x5e, 0xbb, 0xee, 0x43, 0xb8, 0x7f, 0x4d, 0x2f, 0xd3, 0x24, + 0x96, 0x4b, 0x49, 0x5a, 0x74, 0x99, 0x11, 0xba, 0xdf, 0xc2, 0x56, 0x5e, 0xfa, 0xdc, 0xed, 0x21, + 0xd4, 0x03, 0x13, 0x94, 0x2e, 0x7c, 0xeb, 0xf0, 0xbe, 0x6b, 0x46, 0x90, 0x82, 0x3a, 0xd1, 0x98, + 0xc7, 0x28, 0x7d, 0x2f, 0x77, 0x2a, 0x07, 0x59, 0xbd, 0x1e, 0xe4, 0x2f, 0x55, 0xd8, 0x2e, 0x03, + 0xd9, 0x47, 0x50, 0x23, 0x28, 0x85, 0xd1, 0x3a, 0x6c, 0xbb, 0x66, 0x12, 0xae, 0xf2, 0x26, 0x76, + 0xe3, 0xc2, 0x3e, 0x81, 0x4d, 0xee, 0xab, 0x70, 0x8a, 0x03, 0x72, 0xa2, 0x3e, 0x17, 0x10, 0x41, + 0x90, 0x45, 0x40, 0x2d, 0xe3, 0x49, 0xc9, 0xb0, 0x6f, 0x60, 0x07, 0xa7, 0x3c, 0xca, 0xe8, 0x01, + 0x3c, 0x2f, 0x06, 0xdd, 0x5e, 0xa3, 0x2b, 0x3b, 0xae, 0x79, 0x0a, 0x6e, 0xf1, 0x14, 0xdc, 0x2b, + 0x8f, 0xa3, 0xc6, 0xcb, 0xd7, 0x7b, 0x95, 0x17, 0x7f, 0xed, 0x59, 0xde, 0x2a, 0x02, 0x76, 0x02, + 0x6c, 0xa1, 0x7e, 0x9c, 0x3f, 0x30, 0x1a, 0xa5, 0xd6, 0xe1, 0xbb, 0xff, 0xa1, 0x2d, 0x1c, 0x0c, + 0xeb, 0xcf, 0x9a, 0x75, 0x05, 0xbc, 0xfb, 0x67, 0xd5, 0xf4, 0x60, 0x51, 0xa3, 0xf7, 0x61, 0x5d, + 0xa7, 0x98, 0x97, 0xe8, 0xde, 0x52, 0x89, 0x28, 0x55, 0x32, 0x2e, 0xfa, 0x59, 0x5d, 0x3d, 0xb4, + 0x6b, 0xa5, 0xa1, 0xdd, 0x85, 0x66, 0xc4, 0xa5, 0x7a, 0x22, 0x44, 0x22, 0xf2, 0xd9, 0x5f, 0x28, + 0x74, 0xd3, 0xb9, 0x99, 0xd7, 0x5a, 0xa9, 0xe9, 0x34, 0xaf, 0x4b, 0x4d, 0x37, 0x4e, 0xff, 0x57, + 0xde, 0xfa, 0xdd, 0x94, 0x77, 0xe3, 0xed, 0xca, 0xfb, 0x7b, 0x0d, 0xb6, 0xcb, 0x79, 0x94, 0x9f, + 0xc2, 0x55, 0xe9, 0x62, 0xa8, 0x47, 0x7c, 0x88, 0x51, 0x31, 0x67, 0x3b, 0x6e, 0xb1, 0xed, 0xdc, + 0x63, 0xad, 0x7f, 0xc6, 0x43, 0x71, 0xd4, 0xd7, 0x77, 0xfd, 0xf1, 0x7a, 0xef, 0x56, 0xdb, 0xd2, + 0xe0, 0xfb, 0x23, 0x9e, 0x2a, 0x14, 0x5e, 0x7e, 0x0b, 0x9b, 0x41, 0x8b, 0xc7, 0x71, 0xa2, 0x28, + 0x4c, 0xb3, 0xa5, 0xee, 0xee, 0xd2, 0xe5, 0xab, 0x74, 0xfe, 0xba, 0x4e, 0x66, 0x09, 0x5a, 0x9e, + 0x11, 0x58, 0x1f, 0x9a, 0xf9, 0x6b, 0xe3, 0x8a, 0x36, 0xe1, 0x4d, 0x7b, 0xd9, 0x30, 0xb0, 0xbe, + 0x62, 0x9f, 0x43, 0xe3, 0x34, 0x14, 0x38, 0xd2, 0x0c, 0xb7, 0x99, 0x86, 0x0d, 0x42, 0xf5, 0x15, + 0x7b, 0x02, 0x2d, 0x81, 0x32, 0x89, 0xa6, 0x86, 0x63, 0xe3, 0x16, 0x1c, 0x50, 0x00, 0xfb, 0x8a, + 0x3d, 0x85, 0x4d, 0x3d, 0xdc, 0x03, 0x89, 0xb1, 0xd2, 0x3c, 0x8d, 0xdb, 0xf0, 0x68, 0xe4, 0x09, + 0xc6, 0xca, 0x84, 0x33, 0xe5, 0x51, 0x38, 0x1a, 0x64, 0xb1, 0x0a, 0x23, 0x5a, 0xd3, 0x37, 0xa6, + 0x21, 0xe0, 0xd7, 0x1a, 0xc7, 0x9e, 0xc1, 0x3b, 0x67, 0x88, 0xe9, 0xe0, 0x34, 0x14, 0x61, 0x1c, + 0x0c, 0x64, 0x18, 0xfb, 0x48, 0x1b, 0xfd, 0xa6, 0x64, 0xf7, 0x34, 0xfc, 0x29, 0xa1, 0x4f, 0x34, + 0xf8, 0xf0, 0x27, 0x0b, 0x6a, 0x7a, 0x1f, 0x08, 0xf6, 0xc8, 0x1c, 0x24, 0xdb, 0x59, 0x5a, 0x8b, + 0xc5, 0x97, 0xb4, 0xd3, 0x2e, 0x2b, 0xcd, 0x8e, 0xef, 0x56, 0xd8, 0x31, 0x6c, 0x95, 0xbe, 0x12, + 0xec, 0xbd, 0xdc, 0x71, 0xd5, 0x37, 0xa5, 0xb3, 0xbb, 0xda, 0x58, 0xb0, 0x1d, 0x3d, 0x3a, 0xbf, + 0x70, 0x2a, 0xaf, 0x2e, 0x9c, 0xca, 0x9b, 0x0b, 0xc7, 0xfa, 0x71, 0xee, 0x58, 0xbf, 0xce, 0x1d, + 0xeb, 0xe5, 0xdc, 0xb1, 0xce, 0xe7, 0x8e, 0xf5, 0xf7, 0xdc, 0xb1, 0xfe, 0x99, 0x3b, 0x95, 0x37, + 0x73, 0xc7, 0x7a, 0x71, 0xe9, 0x54, 0xce, 0x2f, 0x9d, 0xca, 0xab, 0x4b, 0xa7, 0x32, 0xac, 0x53, + 0xca, 0x1f, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x29, 0x54, 0x5d, 0xa5, 0xe4, 0x08, 0x00, 0x00, } func (this *RulesRequest) Equal(that interface{}) bool { @@ -651,6 +677,12 @@ func (this *RulesRequest) Equal(that interface{}) bool { if this.ExcludeAlerts != that1.ExcludeAlerts { return false } + if this.MaxRuleGroups != that1.MaxRuleGroups { + return false + } + if this.NextToken != that1.NextToken { + return false + } return true } func (this *LivenessCheckRequest) Equal(that interface{}) bool { @@ -725,6 +757,9 @@ func (this *RulesResponse) Equal(that interface{}) bool { return false } } + if this.NextToken != that1.NextToken { + return false + } return true } func (this *GroupStateDesc) Equal(that interface{}) bool { @@ -877,7 +912,7 @@ func (this *RulesRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 12) + s := make([]string, 0, 14) s = append(s, "&ruler.RulesRequest{") s = append(s, "RuleNames: "+fmt.Sprintf("%#v", this.RuleNames)+",\n") s = append(s, "RuleGroupNames: "+fmt.Sprintf("%#v", this.RuleGroupNames)+",\n") @@ -887,6 +922,8 @@ func (this *RulesRequest) GoString() string { s = append(s, "Health: "+fmt.Sprintf("%#v", this.Health)+",\n") s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") s = append(s, "ExcludeAlerts: "+fmt.Sprintf("%#v", this.ExcludeAlerts)+",\n") + s = append(s, "MaxRuleGroups: "+fmt.Sprintf("%#v", this.MaxRuleGroups)+",\n") + s = append(s, "NextToken: "+fmt.Sprintf("%#v", this.NextToken)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -913,11 +950,12 @@ func (this *RulesResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&ruler.RulesResponse{") if this.Groups != nil { s = append(s, "Groups: "+fmt.Sprintf("%#v", this.Groups)+",\n") } + s = append(s, "NextToken: "+fmt.Sprintf("%#v", this.NextToken)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1122,6 +1160,18 @@ func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.NextToken) > 0 { + i -= len(m.NextToken) + copy(dAtA[i:], m.NextToken) + i = encodeVarintRuler(dAtA, i, uint64(len(m.NextToken))) + i-- + dAtA[i] = 0x52 + } + if m.MaxRuleGroups != 0 { + i = encodeVarintRuler(dAtA, i, uint64(m.MaxRuleGroups)) + i-- + dAtA[i] = 0x48 + } if m.ExcludeAlerts { i-- if m.ExcludeAlerts { @@ -1263,6 +1313,13 @@ func (m *RulesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.NextToken) > 0 { + i -= len(m.NextToken) + copy(dAtA[i:], m.NextToken) + i = encodeVarintRuler(dAtA, i, uint64(len(m.NextToken))) + i-- + dAtA[i] = 0x12 + } if len(m.Groups) > 0 { for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { { @@ -1599,6 +1656,13 @@ func (m *RulesRequest) Size() (n int) { if m.ExcludeAlerts { n += 2 } + if m.MaxRuleGroups != 0 { + n += 1 + sovRuler(uint64(m.MaxRuleGroups)) + } + l = len(m.NextToken) + if l > 0 { + n += 1 + l + sovRuler(uint64(l)) + } return n } @@ -1635,6 +1699,10 @@ func (m *RulesResponse) Size() (n int) { n += 1 + l + sovRuler(uint64(l)) } } + l = len(m.NextToken) + if l > 0 { + n += 1 + l + sovRuler(uint64(l)) + } return n } @@ -1755,6 +1823,8 @@ func (this *RulesRequest) String() string { `Health:` + fmt.Sprintf("%v", this.Health) + `,`, `Matchers:` + fmt.Sprintf("%v", this.Matchers) + `,`, `ExcludeAlerts:` + fmt.Sprintf("%v", this.ExcludeAlerts) + `,`, + `MaxRuleGroups:` + fmt.Sprintf("%v", this.MaxRuleGroups) + `,`, + `NextToken:` + fmt.Sprintf("%v", this.NextToken) + `,`, `}`, }, "") return s @@ -1789,6 +1859,7 @@ func (this *RulesResponse) String() string { repeatedStringForGroups += "}" s := strings.Join([]string{`&RulesResponse{`, `Groups:` + repeatedStringForGroups + `,`, + `NextToken:` + fmt.Sprintf("%v", this.NextToken) + `,`, `}`, }, "") return s @@ -2132,6 +2203,57 @@ func (m *RulesRequest) Unmarshal(dAtA []byte) error { } } m.ExcludeAlerts = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRuleGroups", wireType) + } + m.MaxRuleGroups = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRuleGroups |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRuler(dAtA[iNdEx:]) @@ -2344,6 +2466,38 @@ func (m *RulesResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRuler(dAtA[iNdEx:]) diff --git a/pkg/ruler/ruler.proto b/pkg/ruler/ruler.proto index 22745ead97..9464cfd71d 100644 --- a/pkg/ruler/ruler.proto +++ b/pkg/ruler/ruler.proto @@ -29,6 +29,8 @@ message RulesRequest { string health = 6; repeated string matchers = 7; bool excludeAlerts = 8; + int32 maxRuleGroups = 9; + string nextToken = 10; } message LivenessCheckRequest{} @@ -39,6 +41,7 @@ message LivenessCheckResponse{ message RulesResponse { repeated GroupStateDesc groups = 1; + string nextToken = 2; } // GroupStateDesc is a proto representation of a cortex rule group diff --git a/pkg/ruler/ruler_pagination.go b/pkg/ruler/ruler_pagination.go new file mode 100644 index 0000000000..b1a1eb7169 --- /dev/null +++ b/pkg/ruler/ruler_pagination.go @@ -0,0 +1,45 @@ +package ruler + +import ( + "crypto/sha1" + "encoding/hex" +) + +type PaginatedGroupStates []*GroupStateDesc + +func (gi PaginatedGroupStates) Swap(i, j int) { gi[i], gi[j] = gi[j], gi[i] } +func (gi PaginatedGroupStates) Less(i, j int) bool { + return GetRuleGroupNextToken(gi[i].Group.Namespace, gi[i].Group.Name) < GetRuleGroupNextToken(gi[j].Group.Namespace, gi[j].Group.Name) +} +func (gi PaginatedGroupStates) Len() int { return len(gi) } + +func GetRuleGroupNextToken(namespace string, group string) string { + h := sha1.New() + h.Write([]byte(namespace + ";" + group)) + return hex.EncodeToString(h.Sum(nil)) +} + +// generatePage function takes in a sorted list of groups and returns a page of groups and the next token which can be +// used to in subsequent requests. The # of groups per page is at most equal to maxRuleGroups. If the total passed in +// rule group count is greater than maxRuleGroups, then a next token is returned. Otherwise, next token is empty +func generatePage(groups []*GroupStateDesc, maxRuleGroups int) ([]*GroupStateDesc, string) { + resultNumber := 0 + var returnPaginationToken string + returnGroupDescs := make([]*GroupStateDesc, 0, len(groups)) + for _, groupInfo := range groups { + + // Add the rule group to the return slice if the maxRuleGroups is not hit + if maxRuleGroups < 0 || resultNumber < maxRuleGroups { + returnGroupDescs = append(returnGroupDescs, groupInfo) + resultNumber++ + continue + } + + // Return the next token if there are more groups + if maxRuleGroups > 0 && resultNumber == maxRuleGroups { + returnPaginationToken = GetRuleGroupNextToken(returnGroupDescs[maxRuleGroups-1].Group.Namespace, returnGroupDescs[maxRuleGroups-1].Group.Name) + break + } + } + return returnGroupDescs, returnPaginationToken +} diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 7262b7179e..8519fa9d90 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -331,7 +331,9 @@ func TestRuler_Rules(t *testing.T) { // test user1 ctx := user.InjectOrgID(context.Background(), "user1") - rls, err := r.Rules(ctx, &RulesRequest{}) + rls, err := r.Rules(ctx, &RulesRequest{ + MaxRuleGroups: -1, + }) require.NoError(t, err) require.Len(t, rls.Groups, 1) rg := rls.Groups[0] @@ -340,7 +342,9 @@ func TestRuler_Rules(t *testing.T) { // test user2 ctx = user.InjectOrgID(context.Background(), "user2") - rls, err = r.Rules(ctx, &RulesRequest{}) + rls, err = r.Rules(ctx, &RulesRequest{ + MaxRuleGroups: -1, + }) require.NoError(t, err) require.Len(t, rls.Groups, 1) rg = rls.Groups[0] @@ -591,7 +595,8 @@ func TestGetRules(t *testing.T) { "No Sharding with Rule Type Filter": { sharding: false, rulesRequest: RulesRequest{ - Type: alertingRuleFilter, + Type: alertingRuleFilter, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -604,7 +609,8 @@ func TestGetRules(t *testing.T) { "No Sharding with Alert state filter for firing alerts": { sharding: false, rulesRequest: RulesRequest{ - State: firingStateFilter, + State: firingStateFilter, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -616,7 +622,8 @@ func TestGetRules(t *testing.T) { "No Sharding with Alert state filter for inactive alerts": { sharding: false, rulesRequest: RulesRequest{ - State: inactiveStateFilter, + State: inactiveStateFilter, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -628,7 +635,8 @@ func TestGetRules(t *testing.T) { "No Sharding with health filter for OK alerts": { sharding: false, rulesRequest: RulesRequest{ - Health: okHealthFilter, + Health: okHealthFilter, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -640,7 +648,8 @@ func TestGetRules(t *testing.T) { "No Sharding with health filter for unknown alerts": { sharding: false, rulesRequest: RulesRequest{ - Health: unknownHealthFilter, + Health: unknownHealthFilter, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -652,7 +661,8 @@ func TestGetRules(t *testing.T) { "No Sharding with Rule label matcher filter - match 1 rule": { sharding: false, rulesRequest: RulesRequest{ - Matchers: []string{`{alertname="atest_user1_group1_rule_1"}`}, + Matchers: []string{`{alertname="atest_user1_group1_rule_1"}`}, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -664,7 +674,8 @@ func TestGetRules(t *testing.T) { "No Sharding with Rule label matcher filter - label match all alerting rule": { sharding: false, rulesRequest: RulesRequest{ - Matchers: []string{`{alertname=~"atest_.*"}`}, + Matchers: []string{`{alertname=~"atest_.*"}`}, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -677,6 +688,7 @@ func TestGetRules(t *testing.T) { sharding: true, shardingStrategy: util.ShardingStrategyDefault, rulerStateMap: rulerStateMapAllActive, + rulesRequest: RulesRequest{MaxRuleGroups: -1}, expectedCount: map[string]int{ "user1": 5, "user2": 9, @@ -688,6 +700,7 @@ func TestGetRules(t *testing.T) { sharding: true, shardingStrategy: util.ShardingStrategyDefault, rulerStateMap: rulerStateMapAllActive, + rulesRequest: RulesRequest{MaxRuleGroups: -1}, expectedCount: map[string]int{ "user1": 5, "user2": 9, @@ -702,7 +715,8 @@ func TestGetRules(t *testing.T) { shardingStrategy: util.ShardingStrategyShuffle, rulerStateMap: rulerStateMapAllActive, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 3, @@ -717,6 +731,7 @@ func TestGetRules(t *testing.T) { shardingStrategy: util.ShardingStrategyShuffle, rulesRequest: RulesRequest{ RuleGroupNames: []string{"third"}, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -734,6 +749,7 @@ func TestGetRules(t *testing.T) { rulesRequest: RulesRequest{ RuleGroupNames: []string{"second", "third"}, Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 2, @@ -748,8 +764,9 @@ func TestGetRules(t *testing.T) { shardingStrategy: util.ShardingStrategyShuffle, rulerStateMap: rulerStateMapAllActive, rulesRequest: RulesRequest{ - Type: alertingRuleFilter, - Files: []string{"latency-test"}, + Type: alertingRuleFilter, + Files: []string{"latency-test"}, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 0, @@ -764,7 +781,8 @@ func TestGetRules(t *testing.T) { shardingStrategy: util.ShardingStrategyShuffle, rulerStateMap: rulerStateMapOneLeaving, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 3, @@ -779,7 +797,8 @@ func TestGetRules(t *testing.T) { shardingStrategy: util.ShardingStrategyShuffle, rulerStateMap: rulerStateMapOnePending, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedError: ring.ErrTooManyUnhealthyInstances, expectedClientCallCount: 0, @@ -789,7 +808,8 @@ func TestGetRules(t *testing.T) { shuffleShardSize: 2, shardingStrategy: util.ShardingStrategyShuffle, rulesRequest: RulesRequest{ - Matchers: []string{`{alertname="atest_user1_group1_rule_1"}`}, + Matchers: []string{`{alertname="atest_user1_group1_rule_1"}`}, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -804,7 +824,8 @@ func TestGetRules(t *testing.T) { shuffleShardSize: 2, shardingStrategy: util.ShardingStrategyShuffle, rulesRequest: RulesRequest{ - Matchers: []string{`{alertname="atest_user1_group1_rule_1"}`, `{alertname="atest_user2_group1_rule_1"}`}, + Matchers: []string{`{alertname="atest_user1_group1_rule_1"}`, `{alertname="atest_user2_group1_rule_1"}`}, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -819,7 +840,8 @@ func TestGetRules(t *testing.T) { shuffleShardSize: 2, shardingStrategy: util.ShardingStrategyShuffle, rulesRequest: RulesRequest{ - Matchers: []string{`{templatedlabel="{{ $externalURL }}"}`}, + Matchers: []string{`{templatedlabel="{{ $externalURL }}"}`}, + MaxRuleGroups: -1, }, rulerStateMap: rulerStateMapAllActive, expectedCount: map[string]int{ @@ -836,7 +858,8 @@ func TestGetRules(t *testing.T) { rulerStateMap: rulerStateMapAllActive, replicationFactor: 3, rulesRequest: RulesRequest{ - Matchers: []string{`{alertname="atest_user1_group1_rule_1"}`, `{alertname="atest_user2_group1_rule_1"}`}, + Matchers: []string{`{alertname="atest_user1_group1_rule_1"}`, `{alertname="atest_user2_group1_rule_1"}`}, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 1, @@ -852,7 +875,8 @@ func TestGetRules(t *testing.T) { rulerStateMap: rulerStateMapAllActive, replicationFactor: 3, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 3, @@ -868,7 +892,8 @@ func TestGetRules(t *testing.T) { rulerStateMap: rulerStateMapOnePending, replicationFactor: 3, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 3, @@ -884,7 +909,8 @@ func TestGetRules(t *testing.T) { rulerStateMap: rulerStateMapTwoPending, replicationFactor: 3, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedError: ring.ErrTooManyUnhealthyInstances, }, @@ -897,7 +923,8 @@ func TestGetRules(t *testing.T) { rulerAZMap: rulerAZEvenSpread, replicationFactor: 3, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 3, @@ -915,7 +942,8 @@ func TestGetRules(t *testing.T) { rulerAZMap: rulerAZEvenSpread, replicationFactor: 3, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 3, @@ -933,7 +961,8 @@ func TestGetRules(t *testing.T) { rulerAZMap: rulerAZSingleZone, replicationFactor: 3, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedCount: map[string]int{ "user1": 3, @@ -951,7 +980,8 @@ func TestGetRules(t *testing.T) { rulerAZMap: rulerAZEvenSpread, replicationFactor: 3, rulesRequest: RulesRequest{ - Type: recordingRuleFilter, + Type: recordingRuleFilter, + MaxRuleGroups: -1, }, expectedError: ring.ErrTooManyUnhealthyInstances, }, @@ -1063,7 +1093,7 @@ func TestGetRules(t *testing.T) { require.NoError(t, err) } rct := 0 - for _, ruleStateDesc := range ruleStateDescriptions { + for _, ruleStateDesc := range ruleStateDescriptions.Groups { rct += len(ruleStateDesc.ActiveRules) } require.Equal(t, tc.expectedCount[u], rct) @@ -1313,11 +1343,11 @@ func TestGetRulesFromBackup(t *testing.T) { } } ctx := user.InjectOrgID(context.Background(), tenantId) - ruleStateDescriptions, err := rulerAddrMap["ruler1"].GetRules(ctx, RulesRequest{}) + ruleStateDescriptions, err := rulerAddrMap["ruler1"].GetRules(ctx, RulesRequest{MaxRuleGroups: -1}) require.NoError(t, err) - require.Equal(t, 5, len(ruleStateDescriptions)) + require.Equal(t, 5, len(ruleStateDescriptions.Groups)) stateByKey := map[string]*GroupStateDesc{} - for _, state := range ruleStateDescriptions { + for _, state := range ruleStateDescriptions.Groups { stateByKey[state.Group.Namespace+";"+state.Group.Name] = state } // Rule Group Name that starts will b are from the backup and those that start with l are evaluating, the details of @@ -1333,12 +1363,13 @@ func TestGetRulesFromBackup(t *testing.T) { Files: []string{"namespace"}, RuleGroupNames: []string{"b1"}, Type: recordingRuleFilter, + MaxRuleGroups: -1, }) require.NoError(t, err) - require.Equal(t, 1, len(ruleStateDescriptions)) - require.Equal(t, "b1", ruleStateDescriptions[0].Group.Name) - require.Equal(t, 1, len(ruleStateDescriptions[0].ActiveRules)) - require.Equal(t, "rtest_user1_1", ruleStateDescriptions[0].ActiveRules[0].Rule.Record) + require.Equal(t, 1, len(ruleStateDescriptions.Groups)) + require.Equal(t, "b1", ruleStateDescriptions.Groups[0].Group.Name) + require.Equal(t, 1, len(ruleStateDescriptions.Groups[0].ActiveRules)) + require.Equal(t, "rtest_user1_1", ruleStateDescriptions.Groups[0].ActiveRules[0].Rule.Record) } func TestGetRules_HA(t *testing.T) { @@ -1538,11 +1569,11 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { getRules := func(ruler string) { ctx := user.InjectOrgID(context.Background(), tenantId) - ruleStateDescriptions, err := rulerAddrMap[ruler].GetRules(ctx, RulesRequest{}) + ruleStateDescriptions, err := rulerAddrMap[ruler].GetRules(ctx, RulesRequest{MaxRuleGroups: -1}) require.NoError(t, err) - require.Equal(t, 5, len(ruleStateDescriptions)) + require.Equal(t, 5, len(ruleStateDescriptions.Groups)) stateByKey := map[string]*GroupStateDesc{} - for _, state := range ruleStateDescriptions { + for _, state := range ruleStateDescriptions.Groups { stateByKey[state.Group.Namespace+";"+state.Group.Name] = state } // Rule Group Name that starts will b are from the backup and those that start with l are evaluating, the details of @@ -1558,11 +1589,11 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { ctx := user.InjectOrgID(context.Background(), tenantId) - ruleResponse, err := rulerAddrMap["ruler2"].Rules(ctx, &RulesRequest{}) + ruleResponse, err := rulerAddrMap["ruler2"].Rules(ctx, &RulesRequest{MaxRuleGroups: -1}) require.NoError(t, err) require.Equal(t, 5, len(ruleResponse.Groups)) - ruleResponse, err = rulerAddrMap["ruler3"].Rules(ctx, &RulesRequest{}) + ruleResponse, err = rulerAddrMap["ruler3"].Rules(ctx, &RulesRequest{MaxRuleGroups: -1}) require.NoError(t, err) require.Equal(t, 5, len(ruleResponse.Groups)) } @@ -2794,7 +2825,7 @@ func TestRuler_QueryOffset(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), r) //nolint:errcheck ctx := user.InjectOrgID(context.Background(), "user1") - rls, err := r.Rules(ctx, &RulesRequest{}) + rls, err := r.Rules(ctx, &RulesRequest{MaxRuleGroups: -1}) require.NoError(t, err) require.Len(t, rls.Groups, 1) rg := rls.Groups[0] @@ -2806,7 +2837,7 @@ func TestRuler_QueryOffset(t *testing.T) { require.Equal(t, time.Duration(0), *gotOffset) ctx = user.InjectOrgID(context.Background(), "user2") - rls, err = r.Rules(ctx, &RulesRequest{}) + rls, err = r.Rules(ctx, &RulesRequest{MaxRuleGroups: -1}) require.NoError(t, err) require.Len(t, rls.Groups, 1) rg = rls.Groups[0]