From d08ff0ed5ce3620c95555307282f75aa816fb74e Mon Sep 17 00:00:00 2001 From: Charlie Le Date: Sat, 4 May 2024 12:53:51 -0700 Subject: [PATCH] Remove overrides exporter Signed-off-by: Charlie Le --- README.md | 10 - cmd/cortextool/main.go | 22 +- pkg/commands/overrides_exporter.go | 243 ------ pkg/commands/overrides_exporter_test.go | 136 ---- .../cortex/pkg/util/extract/extract.go | 78 -- .../cortex/pkg/util/validation/errors.go | 252 ------- .../cortex/pkg/util/validation/exporter.go | 43 -- .../cortex/pkg/util/validation/limits.go | 702 ------------------ .../validation/notifications_limit_flag.go | 56 -- .../cortex/pkg/util/validation/validate.go | 262 ------- vendor/modules.txt | 2 - 11 files changed, 10 insertions(+), 1796 deletions(-) delete mode 100644 pkg/commands/overrides_exporter.go delete mode 100644 pkg/commands/overrides_exporter_test.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/validation/exporter.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/validation/notifications_limit_flag.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go diff --git a/README.md b/README.md index 33929fe03..0bf030c32 100644 --- a/README.md +++ b/README.md @@ -174,16 +174,6 @@ promtool tsdb dump ./local-tsdb prometheus --storage.tsdb.path ./local-tsdb --config.file=<(echo "") ``` -#### Overrides Exporter - -The Overrides Exporter allows to continuously export [per tenant configuration overrides][runtime-config] as metrics. It can also, optionally, export a presets file (cf. example [override config file] and [presets file]). - - cortextool overrides-exporter --overrides-file overrides.yaml --presets-file presets.yaml - -[override config file]:./pkg/commands/testdata/overrides.yaml -[presets file]:./pkg/commands/testdata/presets.yaml -[runtime-config]:https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file - #### Generate ACL Headers This lets you generate the header which can then be used to enforce access control rules in GME / GrafanaCloud. diff --git a/cmd/cortextool/main.go b/cmd/cortextool/main.go index 111f98269..baad0cb1a 100644 --- a/cmd/cortextool/main.go +++ b/cmd/cortextool/main.go @@ -11,17 +11,16 @@ import ( ) var ( - ruleCommand commands.RuleCommand - alertCommand commands.AlertCommand - alertmanagerCommand commands.AlertmanagerCommand - logConfig commands.LoggerConfig - pushGateway commands.PushGatewayConfig - loadgenCommand commands.LoadgenCommand - remoteReadCommand commands.RemoteReadCommand - aclCommand commands.AccessControlCommand - analyseCommand commands.AnalyseCommand - bucketValidateCommand commands.BucketValidationCommand - overridesExporterCommand = commands.NewOverridesExporterCommand() + ruleCommand commands.RuleCommand + alertCommand commands.AlertCommand + alertmanagerCommand commands.AlertmanagerCommand + logConfig commands.LoggerConfig + pushGateway commands.PushGatewayConfig + loadgenCommand commands.LoadgenCommand + remoteReadCommand commands.RemoteReadCommand + aclCommand commands.AccessControlCommand + analyseCommand commands.AnalyseCommand + bucketValidateCommand commands.BucketValidationCommand ) func main() { @@ -33,7 +32,6 @@ func main() { pushGateway.Register(app) loadgenCommand.Register(app) remoteReadCommand.Register(app) - overridesExporterCommand.Register(app) aclCommand.Register(app) analyseCommand.Register(app) bucketValidateCommand.Register(app) diff --git a/pkg/commands/overrides_exporter.go b/pkg/commands/overrides_exporter.go deleted file mode 100644 index 604eb129a..000000000 --- a/pkg/commands/overrides_exporter.go +++ /dev/null @@ -1,243 +0,0 @@ -package commands - -import ( - "context" - "errors" - "fmt" - "net/http" - "os" - "os/signal" - "sync" - "time" - - "github.com/cortexproject/cortex/pkg/util/validation" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/sirupsen/logrus" - "gopkg.in/alecthomas/kingpin.v2" - "gopkg.in/yaml.v3" -) - -type OverridesExporterCommand struct { - listenAddress string - metricsEndpoint string - overridesFilePath string - presetsFilePath string - refreshInterval time.Duration - - registry *prometheus.Registry - presetsGauge *prometheus.GaugeVec - - lastLimitsMtx sync.Mutex - lastLimits map[string]*validation.Limits -} - -func NewOverridesExporterCommand() *OverridesExporterCommand { - registry := prometheus.NewRegistry() - oc := &OverridesExporterCommand{ - registry: registry, - presetsGauge: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_overrides_presets", - Help: "Preset limits.", - }, []string{"limit_name", "preset"}), - lastLimits: map[string]*validation.Limits{}, - } - - registry.MustRegister(validation.NewOverridesExporter(oc)) - - return oc -} - -func (o *OverridesExporterCommand) Register(app *kingpin.Application) { - overridesExporterCommand := app.Command("overrides-exporter", "The overrides exporter allow to expose metrics about the runtime configuration of Cortex.").Action(o.run) - overridesExporterCommand.Flag("overrides-file", "File path where overrides config is stored.").Required().StringVar(&o.overridesFilePath) - // Presets are the small user, medium user, etc config that we have defined. - overridesExporterCommand.Flag("presets-file", "File path where presets config stored.").Default("").StringVar(&o.presetsFilePath) - overridesExporterCommand.Flag("listen-address", "Address on which to expose metrics.").Default(":9683").StringVar(&o.listenAddress) - overridesExporterCommand.Flag("metrics-endpoint", "Path under which to expose metrics.").Default("/metrics").StringVar(&o.metricsEndpoint) - overridesExporterCommand.Flag("refresh-interval", "Interval how often the overrides and potentially presets files get refreshed.").Default("1m").DurationVar(&o.refreshInterval) -} - -func (o *OverridesExporterCommand) updateOverridesMetrics() error { - if o.overridesFilePath == "" { - return errors.New("overrides filepath is empty") - } - - logrus.Debug("updating overrides") - - overrides := &struct { - TenantLimits map[string]*validation.Limits `yaml:"overrides"` - }{} - bytes, err := os.ReadFile(o.overridesFilePath) - if err != nil { - return fmt.Errorf("failed to update overrides, err: %w", err) - } - if err := yaml.Unmarshal(bytes, overrides); err != nil { - return fmt.Errorf("failed to update overrides, err: %w", err) - } - o.updateMetrics(overrides.TenantLimits) - - return nil -} - -func (o *OverridesExporterCommand) updatePresetsMetrics() error { - if o.presetsFilePath == "" { - return nil - } - - logrus.Debug("updating presets") - - presets := &struct { - Presets map[string]*validation.Limits `yaml:"presets"` - }{} - bytes, err := os.ReadFile(o.presetsFilePath) - if err != nil { - return fmt.Errorf("failed to update presets, error reading file: %w", err) - } - if err := yaml.Unmarshal(bytes, presets); err != nil { - return fmt.Errorf("failed to update presets, error parsing YAML: %w", err) - } - o.updatePresets(presets.Presets) - return nil -} - -func (o *OverridesExporterCommand) updatePresets(presetsMap map[string]*validation.Limits) { - for preset, limits := range presetsMap { - o.presetsGauge.WithLabelValues( - "max_series_per_query", preset, - ).Set(float64(limits.MaxSeriesPerQuery)) - o.presetsGauge.WithLabelValues( - "max_local_series_per_user", preset, - ).Set(float64(limits.MaxLocalSeriesPerUser)) - o.presetsGauge.WithLabelValues( - "max_local_series_per_metric", preset, - ).Set(float64(limits.MaxLocalSeriesPerMetric)) - o.presetsGauge.WithLabelValues( - "max_global_series_per_user", preset, - ).Set(float64(limits.MaxGlobalSeriesPerUser)) - o.presetsGauge.WithLabelValues( - "max_global_series_per_metric", preset, - ).Set(float64(limits.MaxGlobalSeriesPerMetric)) - o.presetsGauge.WithLabelValues( - "ingestion_rate", preset, - ).Set(limits.IngestionRate) - o.presetsGauge.WithLabelValues( - "ingestion_burst_size", preset, - ).Set(float64(limits.IngestionBurstSize)) - } -} - -func (o *OverridesExporterCommand) updateMetrics(limitsMap map[string]*validation.Limits) { - o.lastLimitsMtx.Lock() - o.lastLimits = limitsMap - o.lastLimitsMtx.Unlock() -} - -func (o *OverridesExporterCommand) run(_ *kingpin.ParseContext) error { - if o.overridesFilePath == "" { - return errors.New("empty overrides file path") - } - - // Update the metrics once before starting. - if err := o.updateOverridesMetrics(); err != nil { - return err - } - if err := o.updatePresetsMetrics(); err != nil { - return err - } - - stopCh := make(chan struct{}) - var wg sync.WaitGroup - defer func() { - close(stopCh) - wg.Wait() - }() - - // Update the metrics every 1 minute. - wg.Add(1) - go func() { - defer wg.Done() - - for { - select { - case <-stopCh: - return - case <-time.After(o.refreshInterval): - if err := o.updateOverridesMetrics(); err != nil { - logrus.Warnf("error updating override metrics: %s", err) - } - if err := o.updatePresetsMetrics(); err != nil { - logrus.Warnf("error updating presets metrics: %s", err) - } - } - } - }() - - mux := http.NewServeMux() - mux.Handle(o.metricsEndpoint, promhttp.HandlerFor(o.registry, promhttp.HandlerOpts{ - MaxRequestsInFlight: 10, - Registry: o.registry, - })) - - mux.HandleFunc("/ready", func(w http.ResponseWriter, _ *http.Request) { - http.Error(w, "ready", http.StatusOK) - }) - - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, os.Interrupt) - - server := &http.Server{ - Addr: o.listenAddress, - Handler: mux, - } - - // Block until a signal is received. - wg.Add(1) - go func() { - defer wg.Done() - - select { - case <-stopCh: - return - case s := <-signalCh: - logrus.Infof("got signal: %s", s) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := server.Shutdown(ctx); err != nil { - logrus.Warnf("error shutting down http server: %s", err) - } - } - }() - - mode := "runtime config overrides" - if o.presetsFilePath != "" { - mode += " and presets" - } - logrus.Infof("exposing %s metrics on %s", mode, o.listenAddress) - if err := server.ListenAndServe(); err != http.ErrServerClosed { - return err - } - - return nil -} - -// ByUserID implements validation.TenantLimits. -func (o *OverridesExporterCommand) ByUserID(userID string) *validation.Limits { - o.lastLimitsMtx.Lock() - defer o.lastLimitsMtx.Unlock() - return o.lastLimits[userID] -} - -// AllByUserID implements validation.TenantLimits. -func (o *OverridesExporterCommand) AllByUserID() map[string]*validation.Limits { - o.lastLimitsMtx.Lock() - defer o.lastLimitsMtx.Unlock() - - limits := make(map[string]*validation.Limits, len(o.lastLimits)) - for k, v := range o.lastLimits { - limits[k] = v - } - - return limits -} diff --git a/pkg/commands/overrides_exporter_test.go b/pkg/commands/overrides_exporter_test.go deleted file mode 100644 index 90ab00685..000000000 --- a/pkg/commands/overrides_exporter_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package commands - -import ( - "bytes" - "testing" - - "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/stretchr/testify/assert" -) - -const ( - metricsOverrides = ` -# HELP cortex_overrides Resource limit overrides applied to tenants -# TYPE cortex_overrides gauge -cortex_overrides{limit_name="ingestion_burst_size",user="123"} 7e+06 -cortex_overrides{limit_name="ingestion_burst_size",user="159"} 3.5e+06 -cortex_overrides{limit_name="ingestion_burst_size",user="456"} 7e+06 -cortex_overrides{limit_name="ingestion_burst_size",user="789"} 3.5e+06 -cortex_overrides{limit_name="ingestion_rate",user="123"} 700000 -cortex_overrides{limit_name="ingestion_rate",user="159"} 350000 -cortex_overrides{limit_name="ingestion_rate",user="456"} 700000 -cortex_overrides{limit_name="ingestion_rate",user="789"} 350000 -cortex_overrides{limit_name="max_global_series_per_metric",user="123"} 600000 -cortex_overrides{limit_name="max_global_series_per_metric",user="159"} 300000 -cortex_overrides{limit_name="max_global_series_per_metric",user="456"} 600000 -cortex_overrides{limit_name="max_global_series_per_metric",user="789"} 300000 -cortex_overrides{limit_name="max_global_series_per_user",user="123"} 6e+06 -cortex_overrides{limit_name="max_global_series_per_user",user="159"} 3e+06 -cortex_overrides{limit_name="max_global_series_per_user",user="456"} 6e+06 -cortex_overrides{limit_name="max_global_series_per_user",user="789"} 3e+06 -cortex_overrides{limit_name="max_local_series_per_metric",user="123"} 0 -cortex_overrides{limit_name="max_local_series_per_metric",user="159"} 0 -cortex_overrides{limit_name="max_local_series_per_metric",user="456"} 0 -cortex_overrides{limit_name="max_local_series_per_metric",user="789"} 0 -cortex_overrides{limit_name="max_local_series_per_user",user="123"} 0 -cortex_overrides{limit_name="max_local_series_per_user",user="159"} 0 -cortex_overrides{limit_name="max_local_series_per_user",user="456"} 0 -cortex_overrides{limit_name="max_local_series_per_user",user="789"} 0 -cortex_overrides{limit_name="max_series_per_query",user="123"} 100000 -cortex_overrides{limit_name="max_series_per_query",user="159"} 100000 -cortex_overrides{limit_name="max_series_per_query",user="456"} 100000 -cortex_overrides{limit_name="max_series_per_query",user="789"} 100000 -` - metricsOverridesAndPresets = ` -# HELP cortex_overrides Resource limit overrides applied to tenants -# TYPE cortex_overrides gauge -cortex_overrides{limit_name="ingestion_burst_size",user="123"} 7e+06 -cortex_overrides{limit_name="ingestion_burst_size",user="159"} 3.5e+06 -cortex_overrides{limit_name="ingestion_burst_size",user="456"} 7e+06 -cortex_overrides{limit_name="ingestion_burst_size",user="789"} 3.5e+06 -cortex_overrides{limit_name="ingestion_rate",user="123"} 700000 -cortex_overrides{limit_name="ingestion_rate",user="159"} 350000 -cortex_overrides{limit_name="ingestion_rate",user="456"} 700000 -cortex_overrides{limit_name="ingestion_rate",user="789"} 350000 -cortex_overrides{limit_name="max_global_series_per_metric",user="123"} 600000 -cortex_overrides{limit_name="max_global_series_per_metric",user="159"} 300000 -cortex_overrides{limit_name="max_global_series_per_metric",user="456"} 600000 -cortex_overrides{limit_name="max_global_series_per_metric",user="789"} 300000 -cortex_overrides{limit_name="max_global_series_per_user",user="123"} 6e+06 -cortex_overrides{limit_name="max_global_series_per_user",user="159"} 3e+06 -cortex_overrides{limit_name="max_global_series_per_user",user="456"} 6e+06 -cortex_overrides{limit_name="max_global_series_per_user",user="789"} 3e+06 -cortex_overrides{limit_name="max_local_series_per_metric",user="123"} 0 -cortex_overrides{limit_name="max_local_series_per_metric",user="159"} 0 -cortex_overrides{limit_name="max_local_series_per_metric",user="456"} 0 -cortex_overrides{limit_name="max_local_series_per_metric",user="789"} 0 -cortex_overrides{limit_name="max_local_series_per_user",user="123"} 0 -cortex_overrides{limit_name="max_local_series_per_user",user="159"} 0 -cortex_overrides{limit_name="max_local_series_per_user",user="456"} 0 -cortex_overrides{limit_name="max_local_series_per_user",user="789"} 0 -cortex_overrides{limit_name="max_series_per_query",user="123"} 100000 -cortex_overrides{limit_name="max_series_per_query",user="159"} 100000 -cortex_overrides{limit_name="max_series_per_query",user="456"} 100000 -cortex_overrides{limit_name="max_series_per_query",user="789"} 100000 -# HELP cortex_overrides_presets Preset limits. -# TYPE cortex_overrides_presets gauge -cortex_overrides_presets{limit_name="ingestion_burst_size",preset="big_user"} 7e+06 -cortex_overrides_presets{limit_name="ingestion_burst_size",preset="medium_user"} 3.5e+06 -cortex_overrides_presets{limit_name="ingestion_burst_size",preset="mega_user"} 2.25e+07 -cortex_overrides_presets{limit_name="ingestion_burst_size",preset="small_user"} 1e+06 -cortex_overrides_presets{limit_name="ingestion_burst_size",preset="super_user"} 1.5e+07 -cortex_overrides_presets{limit_name="ingestion_rate",preset="big_user"} 700000 -cortex_overrides_presets{limit_name="ingestion_rate",preset="medium_user"} 350000 -cortex_overrides_presets{limit_name="ingestion_rate",preset="mega_user"} 2.25e+06 -cortex_overrides_presets{limit_name="ingestion_rate",preset="small_user"} 100000 -cortex_overrides_presets{limit_name="ingestion_rate",preset="super_user"} 1.5e+06 -cortex_overrides_presets{limit_name="max_global_series_per_metric",preset="big_user"} 600000 -cortex_overrides_presets{limit_name="max_global_series_per_metric",preset="medium_user"} 300000 -cortex_overrides_presets{limit_name="max_global_series_per_metric",preset="mega_user"} 1.6e+06 -cortex_overrides_presets{limit_name="max_global_series_per_metric",preset="small_user"} 100000 -cortex_overrides_presets{limit_name="max_global_series_per_metric",preset="super_user"} 1.2e+06 -cortex_overrides_presets{limit_name="max_global_series_per_user",preset="big_user"} 6e+06 -cortex_overrides_presets{limit_name="max_global_series_per_user",preset="medium_user"} 3e+06 -cortex_overrides_presets{limit_name="max_global_series_per_user",preset="mega_user"} 1.6e+07 -cortex_overrides_presets{limit_name="max_global_series_per_user",preset="small_user"} 1e+06 -cortex_overrides_presets{limit_name="max_global_series_per_user",preset="super_user"} 1.2e+07 -cortex_overrides_presets{limit_name="max_local_series_per_metric",preset="big_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_metric",preset="medium_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_metric",preset="mega_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_metric",preset="small_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_metric",preset="super_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_user",preset="big_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_user",preset="medium_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_user",preset="mega_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_user",preset="small_user"} 0 -cortex_overrides_presets{limit_name="max_local_series_per_user",preset="super_user"} 0 -cortex_overrides_presets{limit_name="max_series_per_query",preset="big_user"} 100000 -cortex_overrides_presets{limit_name="max_series_per_query",preset="medium_user"} 100000 -cortex_overrides_presets{limit_name="max_series_per_query",preset="mega_user"} 100000 -cortex_overrides_presets{limit_name="max_series_per_query",preset="small_user"} 10000 -cortex_overrides_presets{limit_name="max_series_per_query",preset="super_user"} 100000 -` -) - -func TestOverridesExporterCommand(t *testing.T) { - o := NewOverridesExporterCommand() - - o.overridesFilePath = "testdata/overrides.yaml" - - assert.NoError(t, o.updateOverridesMetrics()) - assert.NoError(t, o.updatePresetsMetrics()) - - count, err := testutil.GatherAndCount(o.registry, "cortex_overrides", "cortex_overrides_presets") - assert.NoError(t, err) - assert.Equal(t, 28, count) - assert.NoError(t, testutil.GatherAndCompare(o.registry, bytes.NewReader([]byte(metricsOverrides)), "cortex_overrides", "cortex_overrides_presets")) - - o.presetsFilePath = "testdata/presets.yaml" - assert.NoError(t, o.updateOverridesMetrics()) - assert.NoError(t, o.updatePresetsMetrics()) - - count, err = testutil.GatherAndCount(o.registry, "cortex_overrides", "cortex_overrides_presets") - assert.NoError(t, err) - assert.Equal(t, 63, count) - assert.NoError(t, testutil.GatherAndCompare(o.registry, bytes.NewReader([]byte(metricsOverridesAndPresets)), "cortex_overrides", "cortex_overrides_presets")) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go deleted file mode 100644 index 4e9eb8ec2..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go +++ /dev/null @@ -1,78 +0,0 @@ -package extract - -import ( - "fmt" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/cortexproject/cortex/pkg/cortexpb" -) - -var ( - errNoMetricNameLabel = fmt.Errorf("No metric name label") -) - -// MetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs. -// The returned metric name string is a copy of the label value. -func MetricNameFromLabelAdapters(labels []cortexpb.LabelAdapter) (string, error) { - unsafeMetricName, err := UnsafeMetricNameFromLabelAdapters(labels) - if err != nil { - return "", err - } - - // Force a string copy since LabelAdapter is often a pointer into - // a large gRPC buffer which we don't want to keep alive on the heap. - return string([]byte(unsafeMetricName)), nil -} - -// UnsafeMetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs. -// The returned metric name string is a reference to the label value (no copy). -func UnsafeMetricNameFromLabelAdapters(labels []cortexpb.LabelAdapter) (string, error) { - for _, label := range labels { - if label.Name == model.MetricNameLabel { - return label.Value, nil - } - } - return "", errNoMetricNameLabel -} - -// MetricNameFromMetric extract the metric name from a model.Metric -func MetricNameFromMetric(m model.Metric) (model.LabelValue, error) { - if value, found := m[model.MetricNameLabel]; found { - return value, nil - } - return "", fmt.Errorf("no MetricNameLabel for chunk") -} - -// MetricNameMatcherFromMatchers extracts the metric name from a set of matchers -func MetricNameMatcherFromMatchers(matchers []*labels.Matcher) (*labels.Matcher, []*labels.Matcher, bool) { - // Handle the case where there is no metric name and all matchers have been - // filtered out e.g. {foo=""}. - if len(matchers) == 0 { - return nil, matchers, false - } - - outMatchers := make([]*labels.Matcher, len(matchers)-1) - for i, matcher := range matchers { - if matcher.Name != model.MetricNameLabel { - continue - } - - // Copy other matchers, excluding the found metric name matcher - copy(outMatchers, matchers[:i]) - copy(outMatchers[i:], matchers[i+1:]) - return matcher, outMatchers, true - } - // Return all matchers if none are metric name matchers - return nil, matchers, false -} - -// MetricNameFromLabels extracts the metric name from a list of Prometheus Labels. -func MetricNameFromLabels(lbls labels.Labels) (metricName string, err error) { - metricName = lbls.Get(model.MetricNameLabel) - if metricName == "" { - err = errNoMetricNameLabel - } - return -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go deleted file mode 100644 index 483bf6acc..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go +++ /dev/null @@ -1,252 +0,0 @@ -package validation - -import ( - "fmt" - "strconv" - "strings" - - "github.com/prometheus/common/model" - - "github.com/cortexproject/cortex/pkg/cortexpb" -) - -// ValidationError is an error returned by series validation. -// -// Ignore stutter warning. -// nolint:revive -type ValidationError error - -// genericValidationError is a basic implementation of ValidationError which can be used when the -// error format only contains the cause and the series. -type genericValidationError struct { - message string - cause string - series []cortexpb.LabelAdapter -} - -func (e *genericValidationError) Error() string { - return fmt.Sprintf(e.message, e.cause, formatLabelSet(e.series)) -} - -// labelNameTooLongError is a customized ValidationError, in that the cause and the series are -// formatted in different order in Error. -type labelNameTooLongError struct { - labelName string - series []cortexpb.LabelAdapter - limit int -} - -func (e *labelNameTooLongError) Error() string { - return fmt.Sprintf("label name too long for metric (actual: %d, limit: %d) metric: %.200q label name: %.200q", len(e.labelName), e.limit, formatLabelSet(e.series), e.labelName) -} - -func newLabelNameTooLongError(series []cortexpb.LabelAdapter, labelName string, limit int) ValidationError { - return &labelNameTooLongError{ - labelName: labelName, - series: series, - limit: limit, - } -} - -// labelValueTooLongError is a customized ValidationError, in that the cause and the series are -// formatted in different order in Error. -type labelValueTooLongError struct { - labelName string - labelValue string - series []cortexpb.LabelAdapter - limit int -} - -func (e *labelValueTooLongError) Error() string { - return fmt.Sprintf("label value too long for metric (actual: %d, limit: %d) metric: %.200q label name: %.200q label value: %.200q", - len(e.labelValue), e.limit, formatLabelSet(e.series), e.labelName, e.labelValue) -} - -func newLabelValueTooLongError(series []cortexpb.LabelAdapter, labelName, labelValue string, limit int) ValidationError { - return &labelValueTooLongError{ - labelName: labelName, - labelValue: labelValue, - series: series, - limit: limit, - } -} - -// labelsSizeBytesExceededError is a customized ValidationError, in that the cause and the series are -// formatted in different order in Error. -type labelsSizeBytesExceededError struct { - labelsSizeBytes int - series []cortexpb.LabelAdapter - limit int -} - -func (e *labelsSizeBytesExceededError) Error() string { - return fmt.Sprintf("labels size bytes exceeded for metric (actual: %d, limit: %d) metric: %.200q", e.labelsSizeBytes, e.limit, formatLabelSet(e.series)) -} - -func labelSizeBytesExceededError(series []cortexpb.LabelAdapter, labelsSizeBytes int, limit int) ValidationError { - return &labelsSizeBytesExceededError{ - labelsSizeBytes: labelsSizeBytes, - series: series, - limit: limit, - } -} - -func newInvalidLabelError(series []cortexpb.LabelAdapter, labelName string) ValidationError { - return &genericValidationError{ - message: "sample invalid label: %.200q metric %.200q", - cause: labelName, - series: series, - } -} - -func newDuplicatedLabelError(series []cortexpb.LabelAdapter, labelName string) ValidationError { - return &genericValidationError{ - message: "duplicate label name: %.200q metric %.200q", - cause: labelName, - series: series, - } -} - -func newLabelsNotSortedError(series []cortexpb.LabelAdapter, labelName string) ValidationError { - return &genericValidationError{ - message: "labels not sorted: %.200q metric %.200q", - cause: labelName, - series: series, - } -} - -type tooManyLabelsError struct { - series []cortexpb.LabelAdapter - limit int -} - -func newTooManyLabelsError(series []cortexpb.LabelAdapter, limit int) ValidationError { - return &tooManyLabelsError{ - series: series, - limit: limit, - } -} - -func (e *tooManyLabelsError) Error() string { - return fmt.Sprintf( - "series has too many labels (actual: %d, limit: %d) series: '%s'", - len(e.series), e.limit, cortexpb.FromLabelAdaptersToMetric(e.series).String()) -} - -type noMetricNameError struct{} - -func newNoMetricNameError() ValidationError { - return &noMetricNameError{} -} - -func (e *noMetricNameError) Error() string { - return "sample missing metric name" -} - -type invalidMetricNameError struct { - metricName string -} - -func newInvalidMetricNameError(metricName string) ValidationError { - return &invalidMetricNameError{ - metricName: metricName, - } -} - -func (e *invalidMetricNameError) Error() string { - return fmt.Sprintf("sample invalid metric name: %.200q", e.metricName) -} - -// sampleValidationError is a ValidationError implementation suitable for sample validation errors. -type sampleValidationError struct { - message string - metricName string - timestamp int64 -} - -func (e *sampleValidationError) Error() string { - return fmt.Sprintf(e.message, e.timestamp, e.metricName) -} - -func newSampleTimestampTooOldError(metricName string, timestamp int64) ValidationError { - return &sampleValidationError{ - message: "timestamp too old: %d metric: %.200q", - metricName: metricName, - timestamp: timestamp, - } -} - -func newSampleTimestampTooNewError(metricName string, timestamp int64) ValidationError { - return &sampleValidationError{ - message: "timestamp too new: %d metric: %.200q", - metricName: metricName, - timestamp: timestamp, - } -} - -// exemplarValidationError is a ValidationError implementation suitable for exemplar validation errors. -type exemplarValidationError struct { - message string - seriesLabels []cortexpb.LabelAdapter - exemplarLabels []cortexpb.LabelAdapter - timestamp int64 -} - -func (e *exemplarValidationError) Error() string { - return fmt.Sprintf(e.message, e.timestamp, cortexpb.FromLabelAdaptersToLabels(e.seriesLabels).String(), cortexpb.FromLabelAdaptersToLabels(e.exemplarLabels).String()) -} - -func newExemplarEmtpyLabelsError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError { - return &exemplarValidationError{ - message: "exemplar missing labels, timestamp: %d series: %s labels: %s", - seriesLabels: seriesLabels, - exemplarLabels: exemplarLabels, - timestamp: timestamp, - } -} - -func newExemplarMissingTimestampError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError { - return &exemplarValidationError{ - message: "exemplar missing timestamp, timestamp: %d series: %s labels: %s", - seriesLabels: seriesLabels, - exemplarLabels: exemplarLabels, - timestamp: timestamp, - } -} - -var labelLenMsg = "exemplar combined labelset exceeds " + strconv.Itoa(ExemplarMaxLabelSetLength) + " characters, timestamp: %d series: %s labels: %s" - -func newExemplarLabelLengthError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError { - return &exemplarValidationError{ - message: labelLenMsg, - seriesLabels: seriesLabels, - exemplarLabels: exemplarLabels, - timestamp: timestamp, - } -} - -// formatLabelSet formats label adapters as a metric name with labels, while preserving -// label order, and keeping duplicates. If there are multiple "__name__" labels, only -// first one is used as metric name, other ones will be included as regular labels. -func formatLabelSet(ls []cortexpb.LabelAdapter) string { - metricName, hasMetricName := "", false - - labelStrings := make([]string, 0, len(ls)) - for _, l := range ls { - if l.Name == model.MetricNameLabel && !hasMetricName && l.Value != "" { - metricName = l.Value - hasMetricName = true - } else { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", l.Name, l.Value)) - } - } - - if len(labelStrings) == 0 { - if hasMetricName { - return metricName - } - return "{}" - } - - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/exporter.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/exporter.go deleted file mode 100644 index 3640c0bd3..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/exporter.go +++ /dev/null @@ -1,43 +0,0 @@ -package validation - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -// OverridesExporter exposes per-tenant resource limit overrides as Prometheus metrics -type OverridesExporter struct { - tenantLimits TenantLimits - description *prometheus.Desc -} - -// NewOverridesExporter creates an OverridesExporter that reads updates to per-tenant -// limits using the provided function. -func NewOverridesExporter(tenantLimits TenantLimits) *OverridesExporter { - return &OverridesExporter{ - tenantLimits: tenantLimits, - description: prometheus.NewDesc( - "cortex_overrides", - "Resource limit overrides applied to tenants", - []string{"limit_name", "user"}, - nil, - ), - } -} - -func (oe *OverridesExporter) Describe(ch chan<- *prometheus.Desc) { - ch <- oe.description -} - -func (oe *OverridesExporter) Collect(ch chan<- prometheus.Metric) { - allLimits := oe.tenantLimits.AllByUserID() - for tenant, limits := range allLimits { - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, limits.IngestionRate, "ingestion_rate", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.IngestionBurstSize), "ingestion_burst_size", tenant) - - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxSeriesPerQuery), "max_series_per_query", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxLocalSeriesPerUser), "max_local_series_per_user", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxLocalSeriesPerMetric), "max_local_series_per_metric", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxGlobalSeriesPerUser), "max_global_series_per_user", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxGlobalSeriesPerMetric), "max_global_series_per_metric", tenant) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go deleted file mode 100644 index 24869b155..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ /dev/null @@ -1,702 +0,0 @@ -package validation - -import ( - "bytes" - "encoding/json" - "errors" - "flag" - "math" - "strings" - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/relabel" - "golang.org/x/time/rate" - - "github.com/cortexproject/cortex/pkg/util/flagext" -) - -var errMaxGlobalSeriesPerUserValidation = errors.New("The ingester.max-global-series-per-user limit is unsupported if distributor.shard-by-all-labels is disabled") - -// Supported values for enum limits -const ( - LocalIngestionRateStrategy = "local" - GlobalIngestionRateStrategy = "global" -) - -// LimitError are errors that do not comply with the limits specified. -type LimitError string - -func (e LimitError) Error() string { - return string(e) -} - -// Limits describe all the limits for users; can be used to describe global default -// limits via flags, or per-user limits via yaml config. -type Limits struct { - // Distributor enforced limits. - IngestionRate float64 `yaml:"ingestion_rate" json:"ingestion_rate"` - IngestionRateStrategy string `yaml:"ingestion_rate_strategy" json:"ingestion_rate_strategy"` - IngestionBurstSize int `yaml:"ingestion_burst_size" json:"ingestion_burst_size"` - AcceptHASamples bool `yaml:"accept_ha_samples" json:"accept_ha_samples"` - HAClusterLabel string `yaml:"ha_cluster_label" json:"ha_cluster_label"` - HAReplicaLabel string `yaml:"ha_replica_label" json:"ha_replica_label"` - HAMaxClusters int `yaml:"ha_max_clusters" json:"ha_max_clusters"` - DropLabels flagext.StringSlice `yaml:"drop_labels" json:"drop_labels"` - MaxLabelNameLength int `yaml:"max_label_name_length" json:"max_label_name_length"` - MaxLabelValueLength int `yaml:"max_label_value_length" json:"max_label_value_length"` - MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series" json:"max_label_names_per_series"` - MaxLabelsSizeBytes int `yaml:"max_labels_size_bytes" json:"max_labels_size_bytes"` - MaxMetadataLength int `yaml:"max_metadata_length" json:"max_metadata_length"` - RejectOldSamples bool `yaml:"reject_old_samples" json:"reject_old_samples"` - RejectOldSamplesMaxAge model.Duration `yaml:"reject_old_samples_max_age" json:"reject_old_samples_max_age"` - CreationGracePeriod model.Duration `yaml:"creation_grace_period" json:"creation_grace_period"` - EnforceMetadataMetricName bool `yaml:"enforce_metadata_metric_name" json:"enforce_metadata_metric_name"` - EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"` - IngestionTenantShardSize int `yaml:"ingestion_tenant_shard_size" json:"ingestion_tenant_shard_size"` - MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty" json:"metric_relabel_configs,omitempty" doc:"nocli|description=List of metric relabel configurations. Note that in most situations, it is more effective to use metrics relabeling directly in the Prometheus server, e.g. remote_write.write_relabel_configs."` - - // Ingester enforced limits. - // Series - MaxSeriesPerQuery int `yaml:"max_series_per_query" json:"max_series_per_query"` - MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"` - MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"` - MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"` - MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"` - // Metadata - MaxLocalMetricsWithMetadataPerUser int `yaml:"max_metadata_per_user" json:"max_metadata_per_user"` - MaxLocalMetadataPerMetric int `yaml:"max_metadata_per_metric" json:"max_metadata_per_metric"` - MaxGlobalMetricsWithMetadataPerUser int `yaml:"max_global_metadata_per_user" json:"max_global_metadata_per_user"` - MaxGlobalMetadataPerMetric int `yaml:"max_global_metadata_per_metric" json:"max_global_metadata_per_metric"` - - // Querier enforced limits. - MaxChunksPerQuery int `yaml:"max_fetched_chunks_per_query" json:"max_fetched_chunks_per_query"` - MaxFetchedSeriesPerQuery int `yaml:"max_fetched_series_per_query" json:"max_fetched_series_per_query"` - MaxFetchedChunkBytesPerQuery int `yaml:"max_fetched_chunk_bytes_per_query" json:"max_fetched_chunk_bytes_per_query"` - MaxFetchedDataBytesPerQuery int `yaml:"max_fetched_data_bytes_per_query" json:"max_fetched_data_bytes_per_query"` - MaxQueryLookback model.Duration `yaml:"max_query_lookback" json:"max_query_lookback"` - MaxQueryLength model.Duration `yaml:"max_query_length" json:"max_query_length"` - MaxQueryParallelism int `yaml:"max_query_parallelism" json:"max_query_parallelism"` - MaxCacheFreshness model.Duration `yaml:"max_cache_freshness" json:"max_cache_freshness"` - MaxQueriersPerTenant int `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"` - QueryVerticalShardSize int `yaml:"query_vertical_shard_size" json:"query_vertical_shard_size" doc:"hidden"` - - // Ruler defaults and limits. - RulerEvaluationDelay model.Duration `yaml:"ruler_evaluation_delay_duration" json:"ruler_evaluation_delay_duration"` - RulerTenantShardSize int `yaml:"ruler_tenant_shard_size" json:"ruler_tenant_shard_size"` - RulerMaxRulesPerRuleGroup int `yaml:"ruler_max_rules_per_rule_group" json:"ruler_max_rules_per_rule_group"` - RulerMaxRuleGroupsPerTenant int `yaml:"ruler_max_rule_groups_per_tenant" json:"ruler_max_rule_groups_per_tenant"` - - // Store-gateway. - StoreGatewayTenantShardSize int `yaml:"store_gateway_tenant_shard_size" json:"store_gateway_tenant_shard_size"` - - // Compactor. - CompactorBlocksRetentionPeriod model.Duration `yaml:"compactor_blocks_retention_period" json:"compactor_blocks_retention_period"` - CompactorTenantShardSize int `yaml:"compactor_tenant_shard_size" json:"compactor_tenant_shard_size"` - - // This config doesn't have a CLI flag registered here because they're registered in - // their own original config struct. - S3SSEType string `yaml:"s3_sse_type" json:"s3_sse_type" doc:"nocli|description=S3 server-side encryption type. Required to enable server-side encryption overrides for a specific tenant. If not set, the default S3 client settings are used."` - S3SSEKMSKeyID string `yaml:"s3_sse_kms_key_id" json:"s3_sse_kms_key_id" doc:"nocli|description=S3 server-side encryption KMS Key ID. Ignored if the SSE type override is not set."` - S3SSEKMSEncryptionContext string `yaml:"s3_sse_kms_encryption_context" json:"s3_sse_kms_encryption_context" doc:"nocli|description=S3 server-side encryption KMS encryption context. If unset and the key ID override is set, the encryption context will not be provided to S3. Ignored if the SSE type override is not set."` - - // Alertmanager. - AlertmanagerReceiversBlockCIDRNetworks flagext.CIDRSliceCSV `yaml:"alertmanager_receivers_firewall_block_cidr_networks" json:"alertmanager_receivers_firewall_block_cidr_networks"` - AlertmanagerReceiversBlockPrivateAddresses bool `yaml:"alertmanager_receivers_firewall_block_private_addresses" json:"alertmanager_receivers_firewall_block_private_addresses"` - - NotificationRateLimit float64 `yaml:"alertmanager_notification_rate_limit" json:"alertmanager_notification_rate_limit"` - NotificationRateLimitPerIntegration NotificationRateLimitMap `yaml:"alertmanager_notification_rate_limit_per_integration" json:"alertmanager_notification_rate_limit_per_integration"` - - AlertmanagerMaxConfigSizeBytes int `yaml:"alertmanager_max_config_size_bytes" json:"alertmanager_max_config_size_bytes"` - AlertmanagerMaxTemplatesCount int `yaml:"alertmanager_max_templates_count" json:"alertmanager_max_templates_count"` - AlertmanagerMaxTemplateSizeBytes int `yaml:"alertmanager_max_template_size_bytes" json:"alertmanager_max_template_size_bytes"` - AlertmanagerMaxDispatcherAggregationGroups int `yaml:"alertmanager_max_dispatcher_aggregation_groups" json:"alertmanager_max_dispatcher_aggregation_groups"` - AlertmanagerMaxAlertsCount int `yaml:"alertmanager_max_alerts_count" json:"alertmanager_max_alerts_count"` - AlertmanagerMaxAlertsSizeBytes int `yaml:"alertmanager_max_alerts_size_bytes" json:"alertmanager_max_alerts_size_bytes"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (l *Limits) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&l.IngestionTenantShardSize, "distributor.ingestion-tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set both on ingesters and distributors. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") - f.Float64Var(&l.IngestionRate, "distributor.ingestion-rate-limit", 25000, "Per-user ingestion rate limit in samples per second.") - f.StringVar(&l.IngestionRateStrategy, "distributor.ingestion-rate-limit-strategy", "local", "Whether the ingestion rate limit should be applied individually to each distributor instance (local), or evenly shared across the cluster (global).") - f.IntVar(&l.IngestionBurstSize, "distributor.ingestion-burst-size", 50000, "Per-user allowed ingestion burst size (in number of samples).") - f.BoolVar(&l.AcceptHASamples, "distributor.ha-tracker.enable-for-all-users", false, "Flag to enable, for all users, handling of samples with external labels identifying replicas in an HA Prometheus setup.") - f.StringVar(&l.HAClusterLabel, "distributor.ha-tracker.cluster", "cluster", "Prometheus label to look for in samples to identify a Prometheus HA cluster.") - f.StringVar(&l.HAReplicaLabel, "distributor.ha-tracker.replica", "__replica__", "Prometheus label to look for in samples to identify a Prometheus HA replica.") - f.IntVar(&l.HAMaxClusters, "distributor.ha-tracker.max-clusters", 0, "Maximum number of clusters that HA tracker will keep track of for single user. 0 to disable the limit.") - f.Var(&l.DropLabels, "distributor.drop-label", "This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels.") - f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names") - f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name") - f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.") - f.IntVar(&l.MaxLabelsSizeBytes, "validation.max-labels-size-bytes", 0, "Maximum combined size in bytes of all labels and label values accepted for a series. 0 to disable the limit.") - f.IntVar(&l.MaxMetadataLength, "validation.max-metadata-length", 1024, "Maximum length accepted for metric metadata. Metadata refers to Metric Name, HELP and UNIT.") - f.BoolVar(&l.RejectOldSamples, "validation.reject-old-samples", false, "Reject old samples.") - _ = l.RejectOldSamplesMaxAge.Set("14d") - f.Var(&l.RejectOldSamplesMaxAge, "validation.reject-old-samples.max-age", "Maximum accepted sample age before rejecting.") - _ = l.CreationGracePeriod.Set("10m") - f.Var(&l.CreationGracePeriod, "validation.create-grace-period", "Duration which table will be created/deleted before/after it's needed; we won't accept sample from before this time.") - f.BoolVar(&l.EnforceMetricName, "validation.enforce-metric-name", true, "Enforce every sample has a metric name.") - f.BoolVar(&l.EnforceMetadataMetricName, "validation.enforce-metadata-metric-name", true, "Enforce every metadata has a metric name.") - - f.IntVar(&l.MaxSeriesPerQuery, "ingester.max-series-per-query", 100000, "The maximum number of series for which a query can fetch samples from each ingester. This limit is enforced only in the ingesters (when querying samples not flushed to the storage yet) and it's a per-instance limit. This limit is ignored when running the Cortex blocks storage. When running Cortex with blocks storage use -querier.max-fetched-series-per-query limit instead.") - f.IntVar(&l.MaxLocalSeriesPerUser, "ingester.max-series-per-user", 5000000, "The maximum number of active series per user, per ingester. 0 to disable.") - f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.") - f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") - f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.") - - f.IntVar(&l.MaxLocalMetricsWithMetadataPerUser, "ingester.max-metadata-per-user", 8000, "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.") - f.IntVar(&l.MaxLocalMetadataPerMetric, "ingester.max-metadata-per-metric", 10, "The maximum number of metadata per metric, per ingester. 0 to disable.") - f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, "ingester.max-global-metadata-per-user", 0, "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") - f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") - f.IntVar(&l.MaxChunksPerQuery, "querier.max-fetched-chunks-per-query", 2000000, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") - f.IntVar(&l.MaxFetchedSeriesPerQuery, "querier.max-fetched-series-per-query", 0, "The maximum number of unique series for which a query can fetch samples from each ingesters and blocks storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable") - f.IntVar(&l.MaxFetchedChunkBytesPerQuery, "querier.max-fetched-chunk-bytes-per-query", 0, "Deprecated (user max-fetched-data-bytes-per-query instead): The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") - f.IntVar(&l.MaxFetchedDataBytesPerQuery, "querier.max-fetched-data-bytes-per-query", 0, "The maximum combined size of all data that a query can fetch from each ingester and storage. This limit is enforced in the querier and ruler for `query`, `query_range` and `series` APIs. 0 to disable.") - f.Var(&l.MaxQueryLength, "store.max-query-length", "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query) and in the querier (on the query possibly split by the query-frontend). 0 to disable.") - f.Var(&l.MaxQueryLookback, "querier.max-query-lookback", "Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.") - f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of split queries will be scheduled in parallel by the frontend.") - _ = l.MaxCacheFreshness.Set("1m") - f.Var(&l.MaxCacheFreshness, "frontend.max-cache-freshness", "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.") - f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.") - f.IntVar(&l.QueryVerticalShardSize, "frontend.query-vertical-shard-size", 0, "[Experimental] Number of shards to use when distributing shardable PromQL queries.") - - f.Var(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", "Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") - f.IntVar(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") - f.IntVar(&l.RulerMaxRulesPerRuleGroup, "ruler.max-rules-per-rule-group", 0, "Maximum number of rules per rule group per-tenant. 0 to disable.") - f.IntVar(&l.RulerMaxRuleGroupsPerTenant, "ruler.max-rule-groups-per-tenant", 0, "Maximum number of rule groups per-tenant. 0 to disable.") - - f.Var(&l.CompactorBlocksRetentionPeriod, "compactor.blocks-retention-period", "Delete blocks containing samples older than the specified retention period. 0 to disable.") - f.IntVar(&l.CompactorTenantShardSize, "compactor.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by the compactor. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") - - // Store-gateway. - f.IntVar(&l.StoreGatewayTenantShardSize, "store-gateway.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set when the store-gateway sharding is enabled with the shuffle-sharding strategy. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") - - // Alertmanager. - f.Var(&l.AlertmanagerReceiversBlockCIDRNetworks, "alertmanager.receivers-firewall-block-cidr-networks", "Comma-separated list of network CIDRs to block in Alertmanager receiver integrations.") - f.BoolVar(&l.AlertmanagerReceiversBlockPrivateAddresses, "alertmanager.receivers-firewall-block-private-addresses", false, "True to block private and local addresses in Alertmanager receiver integrations. It blocks private addresses defined by RFC 1918 (IPv4 addresses) and RFC 4193 (IPv6 addresses), as well as loopback, local unicast and local multicast addresses.") - - f.Float64Var(&l.NotificationRateLimit, "alertmanager.notification-rate-limit", 0, "Per-user rate limit for sending notifications from Alertmanager in notifications/sec. 0 = rate limit disabled. Negative value = no notifications are allowed.") - - if l.NotificationRateLimitPerIntegration == nil { - l.NotificationRateLimitPerIntegration = NotificationRateLimitMap{} - } - f.Var(&l.NotificationRateLimitPerIntegration, "alertmanager.notification-rate-limit-per-integration", "Per-integration notification rate limits. Value is a map, where each key is integration name and value is a rate-limit (float). On command line, this map is given in JSON format. Rate limit has the same meaning as -alertmanager.notification-rate-limit, but only applies for specific integration. Allowed integration names: "+strings.Join(allowedIntegrationNames, ", ")+".") - f.IntVar(&l.AlertmanagerMaxConfigSizeBytes, "alertmanager.max-config-size-bytes", 0, "Maximum size of configuration file for Alertmanager that tenant can upload via Alertmanager API. 0 = no limit.") - f.IntVar(&l.AlertmanagerMaxTemplatesCount, "alertmanager.max-templates-count", 0, "Maximum number of templates in tenant's Alertmanager configuration uploaded via Alertmanager API. 0 = no limit.") - f.IntVar(&l.AlertmanagerMaxTemplateSizeBytes, "alertmanager.max-template-size-bytes", 0, "Maximum size of single template in tenant's Alertmanager configuration uploaded via Alertmanager API. 0 = no limit.") - f.IntVar(&l.AlertmanagerMaxDispatcherAggregationGroups, "alertmanager.max-dispatcher-aggregation-groups", 0, "Maximum number of aggregation groups in Alertmanager's dispatcher that a tenant can have. Each active aggregation group uses single goroutine. When the limit is reached, dispatcher will not dispatch alerts that belong to additional aggregation groups, but existing groups will keep working properly. 0 = no limit.") - f.IntVar(&l.AlertmanagerMaxAlertsCount, "alertmanager.max-alerts-count", 0, "Maximum number of alerts that a single user can have. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.") - f.IntVar(&l.AlertmanagerMaxAlertsSizeBytes, "alertmanager.max-alerts-size-bytes", 0, "Maximum total size of alerts that a single user can have, alert size is the sum of the bytes of its labels, annotations and generatorURL. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.") -} - -// Validate the limits config and returns an error if the validation -// doesn't pass -func (l *Limits) Validate(shardByAllLabels bool) error { - // The ingester.max-global-series-per-user metric is not supported - // if shard-by-all-labels is disabled - if l.MaxGlobalSeriesPerUser > 0 && !shardByAllLabels { - return errMaxGlobalSeriesPerUserValidation - } - - return nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error { - // We want to set l to the defaults and then overwrite it with the input. - // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML - // again, we have to hide it using a type indirection. See prometheus/config. - - // During startup we wont have a default value so we don't want to overwrite them - if defaultLimits != nil { - *l = *defaultLimits - // Make copy of default limits. Otherwise unmarshalling would modify map in default limits. - l.copyNotificationIntegrationLimits(defaultLimits.NotificationRateLimitPerIntegration) - } - type plain Limits - return unmarshal((*plain)(l)) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *Limits) UnmarshalJSON(data []byte) error { - // Like the YAML method above, we want to set l to the defaults and then overwrite - // it with the input. We prevent an infinite loop of calling UnmarshalJSON by hiding - // behind type indirection. - if defaultLimits != nil { - *l = *defaultLimits - // Make copy of default limits. Otherwise unmarshalling would modify map in default limits. - l.copyNotificationIntegrationLimits(defaultLimits.NotificationRateLimitPerIntegration) - } - - type plain Limits - dec := json.NewDecoder(bytes.NewReader(data)) - dec.DisallowUnknownFields() - - return dec.Decode((*plain)(l)) -} - -func (l *Limits) copyNotificationIntegrationLimits(defaults NotificationRateLimitMap) { - l.NotificationRateLimitPerIntegration = make(map[string]float64, len(defaults)) - for k, v := range defaults { - l.NotificationRateLimitPerIntegration[k] = v - } -} - -// When we load YAML from disk, we want the various per-customer limits -// to default to any values specified on the command line, not default -// command line values. This global contains those values. I (Tom) cannot -// find a nicer way I'm afraid. -var defaultLimits *Limits - -// SetDefaultLimitsForYAMLUnmarshalling sets global default limits, used when loading -// Limits from YAML files. This is used to ensure per-tenant limits are defaulted to -// those values. -func SetDefaultLimitsForYAMLUnmarshalling(defaults Limits) { - defaultLimits = &defaults -} - -// TenantLimits exposes per-tenant limit overrides to various resource usage limits -type TenantLimits interface { - // ByUserID gets limits specific to a particular tenant or nil if there are none - ByUserID(userID string) *Limits - - // AllByUserID gets a mapping of all tenant IDs and limits for that user - AllByUserID() map[string]*Limits -} - -// Overrides periodically fetch a set of per-user overrides, and provides convenience -// functions for fetching the correct value. -type Overrides struct { - defaultLimits *Limits - tenantLimits TenantLimits -} - -// NewOverrides makes a new Overrides. -func NewOverrides(defaults Limits, tenantLimits TenantLimits) (*Overrides, error) { - return &Overrides{ - tenantLimits: tenantLimits, - defaultLimits: &defaults, - }, nil -} - -// IngestionRate returns the limit on ingester rate (samples per second). -func (o *Overrides) IngestionRate(userID string) float64 { - return o.GetOverridesForUser(userID).IngestionRate -} - -// IngestionRateStrategy returns whether the ingestion rate limit should be individually applied -// to each distributor instance (local) or evenly shared across the cluster (global). -func (o *Overrides) IngestionRateStrategy() string { - // The ingestion rate strategy can't be overridden on a per-tenant basis - return o.defaultLimits.IngestionRateStrategy -} - -// IngestionBurstSize returns the burst size for ingestion rate. -func (o *Overrides) IngestionBurstSize(userID string) int { - return o.GetOverridesForUser(userID).IngestionBurstSize -} - -// AcceptHASamples returns whether the distributor should track and accept samples from HA replicas for this user. -func (o *Overrides) AcceptHASamples(userID string) bool { - return o.GetOverridesForUser(userID).AcceptHASamples -} - -// HAClusterLabel returns the cluster label to look for when deciding whether to accept a sample from a Prometheus HA replica. -func (o *Overrides) HAClusterLabel(userID string) string { - return o.GetOverridesForUser(userID).HAClusterLabel -} - -// HAReplicaLabel returns the replica label to look for when deciding whether to accept a sample from a Prometheus HA replica. -func (o *Overrides) HAReplicaLabel(userID string) string { - return o.GetOverridesForUser(userID).HAReplicaLabel -} - -// DropLabels returns the list of labels to be dropped when ingesting HA samples for the user. -func (o *Overrides) DropLabels(userID string) flagext.StringSlice { - return o.GetOverridesForUser(userID).DropLabels -} - -// MaxLabelNameLength returns maximum length a label name can be. -func (o *Overrides) MaxLabelNameLength(userID string) int { - return o.GetOverridesForUser(userID).MaxLabelNameLength -} - -// MaxLabelValueLength returns maximum length a label value can be. This also is -// the maximum length of a metric name. -func (o *Overrides) MaxLabelValueLength(userID string) int { - return o.GetOverridesForUser(userID).MaxLabelValueLength -} - -// MaxLabelNamesPerSeries returns maximum number of label/value pairs timeseries. -func (o *Overrides) MaxLabelNamesPerSeries(userID string) int { - return o.GetOverridesForUser(userID).MaxLabelNamesPerSeries -} - -// MaxLabelsSizeBytes returns maximum number of label/value pairs timeseries. -func (o *Overrides) MaxLabelsSizeBytes(userID string) int { - return o.GetOverridesForUser(userID).MaxLabelsSizeBytes -} - -// MaxMetadataLength returns maximum length metadata can be. Metadata refers -// to the Metric Name, HELP and UNIT. -func (o *Overrides) MaxMetadataLength(userID string) int { - return o.GetOverridesForUser(userID).MaxMetadataLength -} - -// RejectOldSamples returns true when we should reject samples older than certain -// age. -func (o *Overrides) RejectOldSamples(userID string) bool { - return o.GetOverridesForUser(userID).RejectOldSamples -} - -// RejectOldSamplesMaxAge returns the age at which samples should be rejected. -func (o *Overrides) RejectOldSamplesMaxAge(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).RejectOldSamplesMaxAge) -} - -// CreationGracePeriod is misnamed, and actually returns how far into the future -// we should accept samples. -func (o *Overrides) CreationGracePeriod(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).CreationGracePeriod) -} - -// MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit. -func (o *Overrides) MaxSeriesPerQuery(userID string) int { - return o.GetOverridesForUser(userID).MaxSeriesPerQuery -} - -// MaxLocalSeriesPerUser returns the maximum number of series a user is allowed to store in a single ingester. -func (o *Overrides) MaxLocalSeriesPerUser(userID string) int { - return o.GetOverridesForUser(userID).MaxLocalSeriesPerUser -} - -// MaxLocalSeriesPerMetric returns the maximum number of series allowed per metric in a single ingester. -func (o *Overrides) MaxLocalSeriesPerMetric(userID string) int { - return o.GetOverridesForUser(userID).MaxLocalSeriesPerMetric -} - -// MaxGlobalSeriesPerUser returns the maximum number of series a user is allowed to store across the cluster. -func (o *Overrides) MaxGlobalSeriesPerUser(userID string) int { - return o.GetOverridesForUser(userID).MaxGlobalSeriesPerUser -} - -// MaxGlobalSeriesPerMetric returns the maximum number of series allowed per metric across the cluster. -func (o *Overrides) MaxGlobalSeriesPerMetric(userID string) int { - return o.GetOverridesForUser(userID).MaxGlobalSeriesPerMetric -} - -// MaxChunksPerQueryFromStore returns the maximum number of chunks allowed per query when fetching -// chunks from the long-term storage. -func (o *Overrides) MaxChunksPerQueryFromStore(userID string) int { - return o.GetOverridesForUser(userID).MaxChunksPerQuery -} - -func (o *Overrides) MaxChunksPerQuery(userID string) int { - return o.GetOverridesForUser(userID).MaxChunksPerQuery -} - -// MaxFetchedSeriesPerQuery returns the maximum number of series allowed per query when fetching -// chunks from ingesters and blocks storage. -func (o *Overrides) MaxFetchedSeriesPerQuery(userID string) int { - return o.GetOverridesForUser(userID).MaxFetchedSeriesPerQuery -} - -// MaxFetchedChunkBytesPerQuery returns the maximum number of bytes for chunks allowed per query when fetching -// chunks from ingesters and blocks storage. -func (o *Overrides) MaxFetchedChunkBytesPerQuery(userID string) int { - return o.GetOverridesForUser(userID).MaxFetchedChunkBytesPerQuery -} - -// MaxFetchedDataBytesPerQuery returns the maximum number of bytes for all data allowed per query when fetching -// from ingesters and blocks storage. -func (o *Overrides) MaxFetchedDataBytesPerQuery(userID string) int { - return o.GetOverridesForUser(userID).MaxFetchedDataBytesPerQuery -} - -// MaxQueryLookback returns the max lookback period of queries. -func (o *Overrides) MaxQueryLookback(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).MaxQueryLookback) -} - -// MaxQueryLength returns the limit of the length (in time) of a query. -func (o *Overrides) MaxQueryLength(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).MaxQueryLength) -} - -// MaxCacheFreshness returns the period after which results are cacheable, -// to prevent caching of very recent results. -func (o *Overrides) MaxCacheFreshness(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).MaxCacheFreshness) -} - -// MaxQueriersPerUser returns the maximum number of queriers that can handle requests for this user. -func (o *Overrides) MaxQueriersPerUser(userID string) int { - return o.GetOverridesForUser(userID).MaxQueriersPerTenant -} - -// QueryVerticalShardSize returns the number of shards to use when distributing shardable PromQL queries. -func (o *Overrides) QueryVerticalShardSize(userID string) int { - return o.GetOverridesForUser(userID).QueryVerticalShardSize -} - -// MaxQueryParallelism returns the limit to the number of split queries the -// frontend will process in parallel. -func (o *Overrides) MaxQueryParallelism(userID string) int { - return o.GetOverridesForUser(userID).MaxQueryParallelism -} - -// EnforceMetricName whether to enforce the presence of a metric name. -func (o *Overrides) EnforceMetricName(userID string) bool { - return o.GetOverridesForUser(userID).EnforceMetricName -} - -// EnforceMetadataMetricName whether to enforce the presence of a metric name on metadata. -func (o *Overrides) EnforceMetadataMetricName(userID string) bool { - return o.GetOverridesForUser(userID).EnforceMetadataMetricName -} - -// MaxLocalMetricsWithMetadataPerUser returns the maximum number of metrics with metadata a user is allowed to store in a single ingester. -func (o *Overrides) MaxLocalMetricsWithMetadataPerUser(userID string) int { - return o.GetOverridesForUser(userID).MaxLocalMetricsWithMetadataPerUser -} - -// MaxLocalMetadataPerMetric returns the maximum number of metadata allowed per metric in a single ingester. -func (o *Overrides) MaxLocalMetadataPerMetric(userID string) int { - return o.GetOverridesForUser(userID).MaxLocalMetadataPerMetric -} - -// MaxGlobalMetricsWithMetadataPerUser returns the maximum number of metrics with metadata a user is allowed to store across the cluster. -func (o *Overrides) MaxGlobalMetricsWithMetadataPerUser(userID string) int { - return o.GetOverridesForUser(userID).MaxGlobalMetricsWithMetadataPerUser -} - -// MaxGlobalMetadataPerMetric returns the maximum number of metadata allowed per metric across the cluster. -func (o *Overrides) MaxGlobalMetadataPerMetric(userID string) int { - return o.GetOverridesForUser(userID).MaxGlobalMetadataPerMetric -} - -// IngestionTenantShardSize returns the ingesters shard size for a given user. -func (o *Overrides) IngestionTenantShardSize(userID string) int { - return o.GetOverridesForUser(userID).IngestionTenantShardSize -} - -// EvaluationDelay returns the rules evaluation delay for a given user. -func (o *Overrides) EvaluationDelay(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).RulerEvaluationDelay) -} - -// CompactorBlocksRetentionPeriod returns the retention period for a given user. -func (o *Overrides) CompactorBlocksRetentionPeriod(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).CompactorBlocksRetentionPeriod) -} - -// CompactorTenantShardSize returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy. -func (o *Overrides) CompactorTenantShardSize(userID string) int { - return o.GetOverridesForUser(userID).CompactorTenantShardSize -} - -// MetricRelabelConfigs returns the metric relabel configs for a given user. -func (o *Overrides) MetricRelabelConfigs(userID string) []*relabel.Config { - return o.GetOverridesForUser(userID).MetricRelabelConfigs -} - -// RulerTenantShardSize returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy. -func (o *Overrides) RulerTenantShardSize(userID string) int { - return o.GetOverridesForUser(userID).RulerTenantShardSize -} - -// RulerMaxRulesPerRuleGroup returns the maximum number of rules per rule group for a given user. -func (o *Overrides) RulerMaxRulesPerRuleGroup(userID string) int { - return o.GetOverridesForUser(userID).RulerMaxRulesPerRuleGroup -} - -// RulerMaxRuleGroupsPerTenant returns the maximum number of rule groups for a given user. -func (o *Overrides) RulerMaxRuleGroupsPerTenant(userID string) int { - return o.GetOverridesForUser(userID).RulerMaxRuleGroupsPerTenant -} - -// StoreGatewayTenantShardSize returns the store-gateway shard size for a given user. -func (o *Overrides) StoreGatewayTenantShardSize(userID string) int { - return o.GetOverridesForUser(userID).StoreGatewayTenantShardSize -} - -// MaxHAClusters returns maximum number of clusters that HA tracker will track for a user. -func (o *Overrides) MaxHAClusters(user string) int { - return o.GetOverridesForUser(user).HAMaxClusters -} - -// S3SSEType returns the per-tenant S3 SSE type. -func (o *Overrides) S3SSEType(user string) string { - return o.GetOverridesForUser(user).S3SSEType -} - -// S3SSEKMSKeyID returns the per-tenant S3 KMS-SSE key id. -func (o *Overrides) S3SSEKMSKeyID(user string) string { - return o.GetOverridesForUser(user).S3SSEKMSKeyID -} - -// S3SSEKMSEncryptionContext returns the per-tenant S3 KMS-SSE encryption context. -func (o *Overrides) S3SSEKMSEncryptionContext(user string) string { - return o.GetOverridesForUser(user).S3SSEKMSEncryptionContext -} - -// AlertmanagerReceiversBlockCIDRNetworks returns the list of network CIDRs that should be blocked -// in the Alertmanager receivers for the given user. -func (o *Overrides) AlertmanagerReceiversBlockCIDRNetworks(user string) []flagext.CIDR { - return o.GetOverridesForUser(user).AlertmanagerReceiversBlockCIDRNetworks -} - -// AlertmanagerReceiversBlockPrivateAddresses returns true if private addresses should be blocked -// in the Alertmanager receivers for the given user. -func (o *Overrides) AlertmanagerReceiversBlockPrivateAddresses(user string) bool { - return o.GetOverridesForUser(user).AlertmanagerReceiversBlockPrivateAddresses -} - -// Notification limits are special. Limits are returned in following order: -// 1. per-tenant limits for given integration -// 2. default limits for given integration -// 3. per-tenant limits -// 4. default limits -func (o *Overrides) getNotificationLimitForUser(user, integration string) float64 { - u := o.GetOverridesForUser(user) - if n, ok := u.NotificationRateLimitPerIntegration[integration]; ok { - return n - } - - return u.NotificationRateLimit -} - -func (o *Overrides) NotificationRateLimit(user string, integration string) rate.Limit { - l := o.getNotificationLimitForUser(user, integration) - if l == 0 || math.IsInf(l, 1) { - return rate.Inf // No rate limit. - } - - if l < 0 { - l = 0 // No notifications will be sent. - } - return rate.Limit(l) -} - -const maxInt = int(^uint(0) >> 1) - -func (o *Overrides) NotificationBurstSize(user string, integration string) int { - // Burst size is computed from rate limit. Rate limit is already normalized to [0, +inf), where 0 means disabled. - l := o.NotificationRateLimit(user, integration) - if l == 0 { - return 0 - } - - // floats can be larger than max int. This also handles case where l == rate.Inf. - if float64(l) >= float64(maxInt) { - return maxInt - } - - // For values between (0, 1), allow single notification per second (every 1/limit seconds). - if l < 1 { - return 1 - } - - return int(l) -} - -func (o *Overrides) AlertmanagerMaxConfigSize(userID string) int { - return o.GetOverridesForUser(userID).AlertmanagerMaxConfigSizeBytes -} - -func (o *Overrides) AlertmanagerMaxTemplatesCount(userID string) int { - return o.GetOverridesForUser(userID).AlertmanagerMaxTemplatesCount -} - -func (o *Overrides) AlertmanagerMaxTemplateSize(userID string) int { - return o.GetOverridesForUser(userID).AlertmanagerMaxTemplateSizeBytes -} - -func (o *Overrides) AlertmanagerMaxDispatcherAggregationGroups(userID string) int { - return o.GetOverridesForUser(userID).AlertmanagerMaxDispatcherAggregationGroups -} - -func (o *Overrides) AlertmanagerMaxAlertsCount(userID string) int { - return o.GetOverridesForUser(userID).AlertmanagerMaxAlertsCount -} - -func (o *Overrides) AlertmanagerMaxAlertsSizeBytes(userID string) int { - return o.GetOverridesForUser(userID).AlertmanagerMaxAlertsSizeBytes -} - -// GetOverridesForUser returns the per-tenant limits with overrides. -func (o *Overrides) GetOverridesForUser(userID string) *Limits { - if o.tenantLimits != nil { - l := o.tenantLimits.ByUserID(userID) - if l != nil { - return l - } - } - return o.defaultLimits -} - -// SmallestPositiveIntPerTenant is returning the minimal positive value of the -// supplied limit function for all given tenants. -func SmallestPositiveIntPerTenant(tenantIDs []string, f func(string) int) int { - var result *int - for _, tenantID := range tenantIDs { - v := f(tenantID) - if result == nil || v < *result { - result = &v - } - } - if result == nil { - return 0 - } - return *result -} - -// SmallestPositiveNonZeroIntPerTenant is returning the minimal positive and -// non-zero value of the supplied limit function for all given tenants. In many -// limits a value of 0 means unlimted so the method will return 0 only if all -// inputs have a limit of 0 or an empty tenant list is given. -func SmallestPositiveNonZeroIntPerTenant(tenantIDs []string, f func(string) int) int { - var result *int - for _, tenantID := range tenantIDs { - v := f(tenantID) - if v > 0 && (result == nil || v < *result) { - result = &v - } - } - if result == nil { - return 0 - } - return *result -} - -// SmallestPositiveNonZeroDurationPerTenant is returning the minimal positive -// and non-zero value of the supplied limit function for all given tenants. In -// many limits a value of 0 means unlimted so the method will return 0 only if -// all inputs have a limit of 0 or an empty tenant list is given. -func SmallestPositiveNonZeroDurationPerTenant(tenantIDs []string, f func(string) time.Duration) time.Duration { - var result *time.Duration - for _, tenantID := range tenantIDs { - v := f(tenantID) - if v > 0 && (result == nil || v < *result) { - result = &v - } - } - if result == nil { - return 0 - } - return *result -} - -// MaxDurationPerTenant is returning the maximum duration per tenant. Without -// tenants given it will return a time.Duration(0). -func MaxDurationPerTenant(tenantIDs []string, f func(string) time.Duration) time.Duration { - result := time.Duration(0) - for _, tenantID := range tenantIDs { - v := f(tenantID) - if v > result { - result = v - } - } - return result -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/notifications_limit_flag.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/notifications_limit_flag.go deleted file mode 100644 index 97f2901d5..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/notifications_limit_flag.go +++ /dev/null @@ -1,56 +0,0 @@ -package validation - -import ( - "encoding/json" - "fmt" - - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util" -) - -var allowedIntegrationNames = []string{ - "webhook", "email", "pagerduty", "opsgenie", "wechat", "slack", "victorops", "pushover", "sns", -} - -type NotificationRateLimitMap map[string]float64 - -// String implements flag.Value -func (m NotificationRateLimitMap) String() string { - out, err := json.Marshal(map[string]float64(m)) - if err != nil { - return fmt.Sprintf("failed to marshal: %v", err) - } - return string(out) -} - -// Set implements flag.Value -func (m NotificationRateLimitMap) Set(s string) error { - newMap := map[string]float64{} - return m.updateMap(json.Unmarshal([]byte(s), &newMap), newMap) -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (m NotificationRateLimitMap) UnmarshalYAML(unmarshal func(interface{}) error) error { - newMap := map[string]float64{} - return m.updateMap(unmarshal(newMap), newMap) -} - -func (m NotificationRateLimitMap) updateMap(unmarshalErr error, newMap map[string]float64) error { - if unmarshalErr != nil { - return unmarshalErr - } - - for k, v := range newMap { - if !util.StringsContain(allowedIntegrationNames, k) { - return errors.Errorf("unknown integration name: %s", k) - } - m[k] = v - } - return nil -} - -// MarshalYAML implements yaml.Marshaler. -func (m NotificationRateLimitMap) MarshalYAML() (interface{}, error) { - return map[string]float64(m), nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go deleted file mode 100644 index 0985e7db7..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ /dev/null @@ -1,262 +0,0 @@ -package validation - -import ( - "net/http" - "strings" - "time" - "unicode/utf8" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/weaveworks/common/httpgrpc" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/extract" -) - -const ( - discardReasonLabel = "reason" - - errMetadataMissingMetricName = "metadata missing metric name" - errMetadataTooLong = "metadata '%s' value too long: %.200q metric %.200q" - - typeMetricName = "METRIC_NAME" - typeHelp = "HELP" - typeUnit = "UNIT" - - metricNameTooLong = "metric_name_too_long" - helpTooLong = "help_too_long" - unitTooLong = "unit_too_long" - - // ErrQueryTooLong is used in chunk store, querier and query frontend. - ErrQueryTooLong = "the query time range exceeds the limit (query length: %s, limit: %s)" - - missingMetricName = "missing_metric_name" - invalidMetricName = "metric_name_invalid" - greaterThanMaxSampleAge = "greater_than_max_sample_age" - maxLabelNamesPerSeries = "max_label_names_per_series" - tooFarInFuture = "too_far_in_future" - invalidLabel = "label_invalid" - labelNameTooLong = "label_name_too_long" - duplicateLabelNames = "duplicate_label_names" - labelsNotSorted = "labels_not_sorted" - labelValueTooLong = "label_value_too_long" - labelsSizeBytesExceeded = "labels_size_bytes_exceeded" - - // Exemplar-specific validation reasons - exemplarLabelsMissing = "exemplar_labels_missing" - exemplarLabelsTooLong = "exemplar_labels_too_long" - exemplarTimestampInvalid = "exemplar_timestamp_invalid" - - // RateLimited is one of the values for the reason to discard samples. - // Declared here to avoid duplication in ingester and distributor. - RateLimited = "rate_limited" - - // Too many HA clusters is one of the reasons for discarding samples. - TooManyHAClusters = "too_many_ha_clusters" - - // DroppedByRelabelConfiguration Samples can also be discarded because of relabeling configuration - DroppedByRelabelConfiguration = "relabel_configuration" - // DroppedByUserConfigurationOverride Samples discarded due to user configuration removing label __name__ - DroppedByUserConfigurationOverride = "user_label_removal_configuration" - - // The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters - // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars - ExemplarMaxLabelSetLength = 128 -) - -// DiscardedSamples is a metric of the number of discarded samples, by reason. -var DiscardedSamples = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cortex_discarded_samples_total", - Help: "The total number of samples that were discarded.", - }, - []string{discardReasonLabel, "user"}, -) - -// DiscardedExemplars is a metric of the number of discarded exemplars, by reason. -var DiscardedExemplars = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cortex_discarded_exemplars_total", - Help: "The total number of exemplars that were discarded.", - }, - []string{discardReasonLabel, "user"}, -) - -// DiscardedMetadata is a metric of the number of discarded metadata, by reason. -var DiscardedMetadata = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cortex_discarded_metadata_total", - Help: "The total number of metadata that were discarded.", - }, - []string{discardReasonLabel, "user"}, -) - -func init() { - prometheus.MustRegister(DiscardedSamples) - prometheus.MustRegister(DiscardedExemplars) - prometheus.MustRegister(DiscardedMetadata) -} - -// ValidateSample returns an err if the sample is invalid. -// The returned error may retain the provided series labels. -func ValidateSample(limits *Limits, userID string, ls []cortexpb.LabelAdapter, s cortexpb.Sample) ValidationError { - unsafeMetricName, _ := extract.UnsafeMetricNameFromLabelAdapters(ls) - - if limits.RejectOldSamples && model.Time(s.TimestampMs) < model.Now().Add(-time.Duration(limits.RejectOldSamplesMaxAge)) { - DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc() - return newSampleTimestampTooOldError(unsafeMetricName, s.TimestampMs) - } - - if model.Time(s.TimestampMs) > model.Now().Add(time.Duration(limits.CreationGracePeriod)) { - DiscardedSamples.WithLabelValues(tooFarInFuture, userID).Inc() - return newSampleTimestampTooNewError(unsafeMetricName, s.TimestampMs) - } - - return nil -} - -// ValidateExemplar returns an error if the exemplar is invalid. -// The returned error may retain the provided series labels. -func ValidateExemplar(userID string, ls []cortexpb.LabelAdapter, e cortexpb.Exemplar) ValidationError { - if len(e.Labels) <= 0 { - DiscardedExemplars.WithLabelValues(exemplarLabelsMissing, userID).Inc() - return newExemplarEmtpyLabelsError(ls, []cortexpb.LabelAdapter{}, e.TimestampMs) - } - - if e.TimestampMs == 0 { - DiscardedExemplars.WithLabelValues(exemplarTimestampInvalid, userID).Inc() - return newExemplarMissingTimestampError( - ls, - e.Labels, - e.TimestampMs, - ) - } - - // Exemplar label length does not include chars involved in text - // rendering such as quotes, commas, etc. See spec and const definition. - labelSetLen := 0 - for _, l := range e.Labels { - labelSetLen += utf8.RuneCountInString(l.Name) - labelSetLen += utf8.RuneCountInString(l.Value) - } - - if labelSetLen > ExemplarMaxLabelSetLength { - DiscardedExemplars.WithLabelValues(exemplarLabelsTooLong, userID).Inc() - return newExemplarLabelLengthError( - ls, - e.Labels, - e.TimestampMs, - ) - } - - return nil -} - -// ValidateLabels returns an err if the labels are invalid. -// The returned error may retain the provided series labels. -func ValidateLabels(limits *Limits, userID string, ls []cortexpb.LabelAdapter, skipLabelNameValidation bool) ValidationError { - if limits.EnforceMetricName { - unsafeMetricName, err := extract.UnsafeMetricNameFromLabelAdapters(ls) - if err != nil { - DiscardedSamples.WithLabelValues(missingMetricName, userID).Inc() - return newNoMetricNameError() - } - - if !model.IsValidMetricName(model.LabelValue(unsafeMetricName)) { - DiscardedSamples.WithLabelValues(invalidMetricName, userID).Inc() - return newInvalidMetricNameError(unsafeMetricName) - } - } - - numLabelNames := len(ls) - if numLabelNames > limits.MaxLabelNamesPerSeries { - DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc() - return newTooManyLabelsError(ls, limits.MaxLabelNamesPerSeries) - } - - maxLabelNameLength := limits.MaxLabelNameLength - maxLabelValueLength := limits.MaxLabelValueLength - lastLabelName := "" - maxLabelsSizeBytes := limits.MaxLabelsSizeBytes - labelsSizeBytes := 0 - - for _, l := range ls { - if !skipLabelNameValidation && !model.LabelName(l.Name).IsValid() { - DiscardedSamples.WithLabelValues(invalidLabel, userID).Inc() - return newInvalidLabelError(ls, l.Name) - } else if len(l.Name) > maxLabelNameLength { - DiscardedSamples.WithLabelValues(labelNameTooLong, userID).Inc() - return newLabelNameTooLongError(ls, l.Name, maxLabelNameLength) - } else if len(l.Value) > maxLabelValueLength { - DiscardedSamples.WithLabelValues(labelValueTooLong, userID).Inc() - return newLabelValueTooLongError(ls, l.Name, l.Value, maxLabelValueLength) - } else if cmp := strings.Compare(lastLabelName, l.Name); cmp >= 0 { - if cmp == 0 { - DiscardedSamples.WithLabelValues(duplicateLabelNames, userID).Inc() - return newDuplicatedLabelError(ls, l.Name) - } - - DiscardedSamples.WithLabelValues(labelsNotSorted, userID).Inc() - return newLabelsNotSortedError(ls, l.Name) - } - - lastLabelName = l.Name - labelsSizeBytes += l.Size() - } - if maxLabelsSizeBytes > 0 && labelsSizeBytes > maxLabelsSizeBytes { - DiscardedSamples.WithLabelValues(labelsSizeBytesExceeded, userID).Inc() - return labelSizeBytesExceededError(ls, labelsSizeBytes, maxLabelsSizeBytes) - } - return nil -} - -// ValidateMetadata returns an err if a metric metadata is invalid. -func ValidateMetadata(cfg *Limits, userID string, metadata *cortexpb.MetricMetadata) error { - if cfg.EnforceMetadataMetricName && metadata.GetMetricFamilyName() == "" { - DiscardedMetadata.WithLabelValues(missingMetricName, userID).Inc() - return httpgrpc.Errorf(http.StatusBadRequest, errMetadataMissingMetricName) - } - - maxMetadataValueLength := cfg.MaxMetadataLength - var reason string - var cause string - var metadataType string - if len(metadata.GetMetricFamilyName()) > maxMetadataValueLength { - metadataType = typeMetricName - reason = metricNameTooLong - cause = metadata.GetMetricFamilyName() - } else if len(metadata.Help) > maxMetadataValueLength { - metadataType = typeHelp - reason = helpTooLong - cause = metadata.Help - } else if len(metadata.Unit) > maxMetadataValueLength { - metadataType = typeUnit - reason = unitTooLong - cause = metadata.Unit - } - - if reason != "" { - DiscardedMetadata.WithLabelValues(reason, userID).Inc() - return httpgrpc.Errorf(http.StatusBadRequest, errMetadataTooLong, metadataType, cause, metadata.GetMetricFamilyName()) - } - - return nil -} - -func DeletePerUserValidationMetrics(userID string, log log.Logger) { - filter := map[string]string{"user": userID} - - if err := util.DeleteMatchingLabels(DiscardedSamples, filter); err != nil { - level.Warn(log).Log("msg", "failed to remove cortex_discarded_samples_total metric for user", "user", userID, "err", err) - } - if err := util.DeleteMatchingLabels(DiscardedExemplars, filter); err != nil { - level.Warn(log).Log("msg", "failed to remove cortex_discarded_exemplars_total metric for user", "user", userID, "err", err) - } - if err := util.DeleteMatchingLabels(DiscardedMetadata, filter); err != nil { - level.Warn(log).Log("msg", "failed to remove cortex_discarded_metadata_total metric for user", "user", userID, "err", err) - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7d6137069..5ca2eef6e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -279,7 +279,6 @@ github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex github.com/cortexproject/cortex/pkg/tenant github.com/cortexproject/cortex/pkg/util github.com/cortexproject/cortex/pkg/util/backoff -github.com/cortexproject/cortex/pkg/util/extract github.com/cortexproject/cortex/pkg/util/flagext github.com/cortexproject/cortex/pkg/util/grpcclient github.com/cortexproject/cortex/pkg/util/grpcencoding/snappy @@ -292,7 +291,6 @@ github.com/cortexproject/cortex/pkg/util/runutil github.com/cortexproject/cortex/pkg/util/services github.com/cortexproject/cortex/pkg/util/spanlogger github.com/cortexproject/cortex/pkg/util/tls -github.com/cortexproject/cortex/pkg/util/validation # github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 ## explicit github.com/danwakefield/fnmatch