Skip to content

Commit

Permalink
Removing deprecated chunk storage flag (#5940)
Browse files Browse the repository at this point in the history
* Removing deprecated chunk storage flag

Signed-off-by: alanprot <[email protected]>

* lint

Signed-off-by: alanprot <[email protected]>

---------

Signed-off-by: alanprot <[email protected]>
  • Loading branch information
alanprot authored May 10, 2024
1 parent af97ee3 commit 02254ea
Show file tree
Hide file tree
Showing 5 changed files with 3 additions and 23 deletions.
8 changes: 0 additions & 8 deletions docs/configuration/config-file-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -3148,14 +3148,6 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s
# CLI flag: -ingester.max-exemplars
[max_exemplars: <int> | default = 0]
# The maximum number of series for which a query can fetch samples from each
# ingester. This limit is enforced only in the ingesters (when querying samples
# not flushed to the storage yet) and it's a per-instance limit. This limit is
# ignored when running the Cortex blocks storage. When running Cortex with
# blocks storage use -querier.max-fetched-series-per-query limit instead.
# CLI flag: -ingester.max-series-per-query
[max_series_per_query: <int> | default = 100000]
# The maximum number of active series per user, per ingester. 0 to disable.
# CLI flag: -ingester.max-series-per-user
[max_series_per_user: <int> | default = 5000000]
Expand Down
2 changes: 0 additions & 2 deletions pkg/cortex/runtime_config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ overrides:
max_global_series_per_metric: 7000
max_global_series_per_user: 15000
max_series_per_metric: 0
max_series_per_query: 30000
max_series_per_user: 0
ruler_max_rule_groups_per_tenant: 20
ruler_max_rules_per_rule_group: 20
Expand All @@ -37,7 +36,6 @@ overrides:
IngestionBurstSize: 15000,
MaxGlobalSeriesPerUser: 15000,
MaxGlobalSeriesPerMetric: 7000,
MaxSeriesPerQuery: 30000,
RulerMaxRulesPerRuleGroup: 20,
RulerMaxRuleGroupsPerTenant: 20,
}
Expand Down
5 changes: 0 additions & 5 deletions pkg/ingester/limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,6 @@ func (l *Limiter) AssertMaxMetricsWithMetadataPerUser(userID string, metrics int
return errMaxMetadataPerUserLimitExceeded
}

// MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit.
func (l *Limiter) MaxSeriesPerQuery(userID string) int {
return l.limits.MaxSeriesPerQuery(userID)
}

// FormatError returns the input error enriched with the actual limits for the given user.
// It acts as pass-through if the input error is unknown.
func (l *Limiter) FormatError(userID string, err error) error {
Expand Down
1 change: 0 additions & 1 deletion pkg/util/validation/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ func (oe *OverridesExporter) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, limits.IngestionRate, "ingestion_rate", tenant)
ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.IngestionBurstSize), "ingestion_burst_size", tenant)

ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxSeriesPerQuery), "max_series_per_query", tenant)
ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxLocalSeriesPerUser), "max_local_series_per_user", tenant)
ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxLocalSeriesPerMetric), "max_local_series_per_metric", tenant)
ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxGlobalSeriesPerUser), "max_global_series_per_user", tenant)
Expand Down
10 changes: 3 additions & 7 deletions pkg/util/validation/limits.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (
"golang.org/x/time/rate"

"github.com/cortexproject/cortex/pkg/util/flagext"
util_log "github.com/cortexproject/cortex/pkg/util/log"
)

var errMaxGlobalSeriesPerUserValidation = errors.New("The ingester.max-global-series-per-user limit is unsupported if distributor.shard-by-all-labels is disabled")
Expand Down Expand Up @@ -101,7 +102,6 @@ type Limits struct {

// Ingester enforced limits.
// Series
MaxSeriesPerQuery int `yaml:"max_series_per_query" json:"max_series_per_query"`
MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"`
MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"`
MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"`
Expand Down Expand Up @@ -171,6 +171,8 @@ type Limits struct {

// RegisterFlags adds the flags required to config this to the given FlagSet
func (l *Limits) RegisterFlags(f *flag.FlagSet) {
flagext.DeprecatedFlag(f, "ingester.max-series-per-query", "Deprecated: The maximum number of series for which a query can fetch samples from each ingester. This limit is enforced only in the ingesters (when querying samples not flushed to the storage yet) and it's a per-instance limit. This limit is ignored when running the Cortex blocks storage. When running Cortex with blocks storage use -querier.max-fetched-series-per-query limit instead.", util_log.Logger)

f.IntVar(&l.IngestionTenantShardSize, "distributor.ingestion-tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set both on ingesters and distributors. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.")
f.Float64Var(&l.IngestionRate, "distributor.ingestion-rate-limit", 25000, "Per-user ingestion rate limit in samples per second.")
f.StringVar(&l.IngestionRateStrategy, "distributor.ingestion-rate-limit-strategy", "local", "Whether the ingestion rate limit should be applied individually to each distributor instance (local), or evenly shared across the cluster (global).")
Expand All @@ -193,7 +195,6 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&l.EnforceMetricName, "validation.enforce-metric-name", true, "Enforce every sample has a metric name.")
f.BoolVar(&l.EnforceMetadataMetricName, "validation.enforce-metadata-metric-name", true, "Enforce every metadata has a metric name.")

f.IntVar(&l.MaxSeriesPerQuery, "ingester.max-series-per-query", 100000, "The maximum number of series for which a query can fetch samples from each ingester. This limit is enforced only in the ingesters (when querying samples not flushed to the storage yet) and it's a per-instance limit. This limit is ignored when running the Cortex blocks storage. When running Cortex with blocks storage use -querier.max-fetched-series-per-query limit instead.")
f.IntVar(&l.MaxLocalSeriesPerUser, "ingester.max-series-per-user", 5000000, "The maximum number of active series per user, per ingester. 0 to disable.")
f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.")
f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.")
Expand Down Expand Up @@ -493,11 +494,6 @@ func (o *Overrides) CreationGracePeriod(userID string) time.Duration {
return time.Duration(o.GetOverridesForUser(userID).CreationGracePeriod)
}

// MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit.
func (o *Overrides) MaxSeriesPerQuery(userID string) int {
return o.GetOverridesForUser(userID).MaxSeriesPerQuery
}

// MaxLocalSeriesPerUser returns the maximum number of series a user is allowed to store in a single ingester.
func (o *Overrides) MaxLocalSeriesPerUser(userID string) int {
return o.GetOverridesForUser(userID).MaxLocalSeriesPerUser
Expand Down

0 comments on commit 02254ea

Please sign in to comment.