Skip to content

Commit

Permalink
prometheus.exporter.*: validate scrape configs while generating (#5576)
Browse files Browse the repository at this point in the history
  • Loading branch information
captncraig authored Oct 24, 2023
1 parent 21c8cd7 commit 85c6f92
Show file tree
Hide file tree
Showing 8 changed files with 91 additions and 21 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ Main (unreleased)
- Fix an issue with static mode and `promtail` converters, where static targets
did not correctly default to `localhost` when not provided. (@thampiotr)

- Fix validation issue with ServiceMonitors when scrape timeout is greater than interval. (@captncraig)

### Enhancements

- The `loki.write` WAL now has snappy compression enabled by default. (@thepalbi)
Expand Down
6 changes: 2 additions & 4 deletions component/prometheus/operator/common/crdmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -494,16 +494,14 @@ func (c *crdManager) addProbe(p *promopv1.Probe) {
if err != nil {
// TODO(jcreixell): Generate Kubernetes event to inform of this error when running `kubectl get <probe>`.
level.Error(c.logger).Log("name", p.Name, "err", err, "msg", "error generating scrapeconfig from probe")
c.addDebugInfo(p.Namespace, p.Name, err)
return
}
c.mut.Lock()
c.discoveryConfigs[pmc.JobName] = pmc.ServiceDiscoveryConfigs
c.scrapeConfigs[pmc.JobName] = pmc
c.mut.Unlock()

if err != nil {
c.addDebugInfo(p.Namespace, p.Name, err)
return
}
if err = c.apply(); err != nil {
level.Error(c.logger).Log("name", p.Name, "err", err, "msg", "error applying scrape configs from "+c.kind)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -277,5 +277,5 @@ func (cg *ConfigGenerator) GeneratePodMonitorConfig(m *promopv1.PodMonitor, ep p
cfg.LabelNameLengthLimit = uint(m.Spec.LabelNameLengthLimit)
cfg.LabelValueLengthLimit = uint(m.Spec.LabelValueLengthLimit)

return cfg, nil
return cfg, cfg.Validate(cg.ScrapeOptions.GlobalConfig())
}
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

"github.com/grafana/agent/component/common/kubernetes"
flow_relabel "github.com/grafana/agent/component/common/relabel"
"github.com/grafana/agent/component/prometheus/operator"
"github.com/grafana/agent/pkg/util"
promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
commonConfig "github.com/prometheus/common/config"
Expand Down Expand Up @@ -65,8 +66,8 @@ func TestGeneratePodMonitorConfig(t *testing.T) {
expected: &config.ScrapeConfig{
JobName: "podMonitor/operator/podmonitor/1",
HonorTimestamps: true,
ScrapeInterval: model.Duration(time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeInterval: model.Duration(time.Hour),
ScrapeTimeout: model.Duration(42 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonConfig.HTTPClientConfig{
Expand Down Expand Up @@ -121,8 +122,8 @@ func TestGeneratePodMonitorConfig(t *testing.T) {
expected: &config.ScrapeConfig{
JobName: "podMonitor/operator/podmonitor/1",
HonorTimestamps: true,
ScrapeInterval: model.Duration(time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeInterval: model.Duration(time.Hour),
ScrapeTimeout: model.Duration(42 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonConfig.HTTPClientConfig{
Expand Down Expand Up @@ -177,8 +178,64 @@ func TestGeneratePodMonitorConfig(t *testing.T) {
expected: &config.ScrapeConfig{
JobName: "podMonitor/operator/podmonitor/1",
HonorTimestamps: true,
ScrapeInterval: model.Duration(time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeInterval: model.Duration(time.Hour),
ScrapeTimeout: model.Duration(42 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonConfig.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: discovery.Configs{
&promk8s.SDConfig{
Role: "pod",

NamespaceDiscovery: promk8s.NamespaceDiscovery{
IncludeOwnNamespace: false,
Names: []string{"operator"},
},
},
},
},
},
{
name: "defaults_from_scrapeoptions",
m: &promopv1.PodMonitor{
ObjectMeta: metav1.ObjectMeta{
Namespace: "operator",
Name: "podmonitor",
},
},
ep: promopv1.PodMetricsEndpoint{
TargetPort: &intstr.IntOrString{IntVal: 8080, Type: intstr.Int},
},
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
- source_labels: [job]
target_label: __tmp_prometheus_job_name
- source_labels: [__meta_kubernetes_pod_phase]
regex: (Failed|Succeeded)
action: drop
- source_labels: ["__meta_kubernetes_pod_container_port_number"]
regex: "8080"
action: "keep"
- source_labels: [__meta_kubernetes_namespace]
target_label: namespace
- source_labels: [__meta_kubernetes_pod_container_name]
target_label: container
- source_labels: [__meta_kubernetes_pod_name]
target_label: pod
- target_label: job
replacement: operator/podmonitor
- target_label: endpoint
replacement: "8080"
`),
expected: &config.ScrapeConfig{
JobName: "podMonitor/operator/podmonitor/1",
HonorTimestamps: true,
ScrapeInterval: model.Duration(time.Hour),
ScrapeTimeout: model.Duration(42 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonConfig.HTTPClientConfig{
Expand Down Expand Up @@ -247,8 +304,8 @@ func TestGeneratePodMonitorConfig(t *testing.T) {
FollowRedirects: &falseVal,
ProxyURL: &proxyURL,
Scheme: "https",
ScrapeTimeout: "17m",
Interval: "1s",
ScrapeTimeout: "17s",
Interval: "12m",
HonorLabels: true,
HonorTimestamps: &falseVal,
FilterRunning: &falseVal,
Expand Down Expand Up @@ -325,8 +382,8 @@ func TestGeneratePodMonitorConfig(t *testing.T) {
JobName: "podMonitor/operator/podmonitor/1",
HonorTimestamps: false,
HonorLabels: true,
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(17 * time.Minute),
ScrapeInterval: model.Duration(12 * time.Minute),
ScrapeTimeout: model.Duration(17 * time.Second),
MetricsPath: "/foo",
Scheme: "https",
Params: url.Values{
Expand Down Expand Up @@ -368,6 +425,10 @@ func TestGeneratePodMonitorConfig(t *testing.T) {
AdditionalRelabelConfigs: []*flow_relabel.Config{
{TargetLabel: "__meta_foo", Replacement: "bar"},
},
ScrapeOptions: operator.ScrapeOptions{
DefaultScrapeInterval: time.Hour,
DefaultScrapeTimeout: 42 * time.Second,
},
}
cfg, err := cg.GeneratePodMonitorConfig(tc.m, tc.ep, 1)
require.NoError(t, err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,5 +230,5 @@ func (cg *ConfigGenerator) GenerateProbeConfig(m *promopv1.Probe) (cfg *config.S
}
cfg.MetricRelabelConfigs = metricRelabels.configs

return cfg, nil
return cfg, cfg.Validate(cg.ScrapeOptions.GlobalConfig())
}
Original file line number Diff line number Diff line change
Expand Up @@ -301,5 +301,5 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit
cfg.LabelNameLengthLimit = uint(m.Spec.LabelNameLengthLimit)
cfg.LabelValueLengthLimit = uint(m.Spec.LabelValueLengthLimit)

return cfg, nil
return cfg, cfg.Validate(cg.ScrapeOptions.GlobalConfig())
}
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,8 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
FollowRedirects: &falseVal,
ProxyURL: &proxyURL,
Scheme: "https",
ScrapeTimeout: "17m",
Interval: "1s",
ScrapeTimeout: "17s",
Interval: "12m",
HonorLabels: true,
HonorTimestamps: &falseVal,
FilterRunning: &falseVal,
Expand Down Expand Up @@ -377,8 +377,8 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
Params: url.Values{
"a": []string{"b"},
},
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(17 * time.Minute),
ScrapeInterval: model.Duration(12 * time.Minute),
ScrapeTimeout: model.Duration(17 * time.Second),
MetricsPath: "/foo",
Scheme: "https",
HTTPClientConfig: commonConfig.HTTPClientConfig{
Expand Down
9 changes: 9 additions & 0 deletions component/prometheus/operator/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import (
"github.com/grafana/agent/component/common/kubernetes"
flow_relabel "github.com/grafana/agent/component/common/relabel"
"github.com/grafana/agent/component/prometheus/scrape"
"github.com/prometheus/common/model"
promconfig "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/storage"
apiv1 "k8s.io/api/core/v1"
)
Expand Down Expand Up @@ -46,6 +48,13 @@ type ScrapeOptions struct {
DefaultScrapeTimeout time.Duration `river:"default_scrape_timeout,attr,optional"`
}

func (s *ScrapeOptions) GlobalConfig() promconfig.GlobalConfig {
cfg := promconfig.DefaultGlobalConfig
cfg.ScrapeInterval = model.Duration(s.DefaultScrapeInterval)
cfg.ScrapeTimeout = model.Duration(s.DefaultScrapeTimeout)
return cfg
}

var DefaultArguments = Arguments{
Client: kubernetes.ClientArguments{
HTTPClientConfig: config.DefaultHTTPClientConfig,
Expand Down

0 comments on commit 85c6f92

Please sign in to comment.