From 39c4dadd4c922d4eedd793146e4b4794e8be4431 Mon Sep 17 00:00:00 2001 From: sakulali Date: Sat, 18 Nov 2023 14:52:04 +0800 Subject: [PATCH] chore: keep the original implementation if the feature is disable Signed-off-by: sakulali --- receiver/mongodbreceiver/go.mod | 2 +- receiver/mongodbreceiver/integration_test.go | 6 + .../metadata/metrics_database_attr_version.go | 260 ++++++++++++++++++ receiver/mongodbreceiver/metrics.go | 150 +++++++--- receiver/mongodbreceiver/scraper.go | 54 ++-- receiver/mongodbreceiver/scraper_test.go | 37 +-- 6 files changed, 421 insertions(+), 88 deletions(-) create mode 100644 receiver/mongodbreceiver/internal/metadata/metrics_database_attr_version.go diff --git a/receiver/mongodbreceiver/go.mod b/receiver/mongodbreceiver/go.mod index 60c26a0fa411..36812d5b9c29 100644 --- a/receiver/mongodbreceiver/go.mod +++ b/receiver/mongodbreceiver/go.mod @@ -17,6 +17,7 @@ require ( go.opentelemetry.io/collector/config/configtls v0.89.0 go.opentelemetry.io/collector/confmap v0.89.0 go.opentelemetry.io/collector/consumer v0.89.0 + go.opentelemetry.io/collector/featuregate v1.0.0-rcv0018 go.opentelemetry.io/collector/pdata v1.0.0-rcv0018 go.opentelemetry.io/collector/receiver v0.89.0 go.uber.org/multierr v1.11.0 @@ -82,7 +83,6 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.89.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.89.0 // indirect - go.opentelemetry.io/collector/featuregate v1.0.0-rcv0018 // indirect go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect diff --git a/receiver/mongodbreceiver/integration_test.go b/receiver/mongodbreceiver/integration_test.go index d23013cd64f3..51abdd4fb90d 100644 --- a/receiver/mongodbreceiver/integration_test.go +++ b/receiver/mongodbreceiver/integration_test.go @@ -12,10 +12,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/featuregate" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/scraperinttest" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" @@ -24,6 +26,10 @@ import ( const mongoPort = "27017" func TestIntegration(t *testing.T) { + // Simulate enable removeDatabaseAttrFeatureGate + err := featuregate.GlobalRegistry().Set(removeDatabaseAttrID, true) + require.NoError(t, err) + t.Run("4.0", integrationTest("4_0", []string{"/setup.sh"}, func(*Config) {})) t.Run("5.0", integrationTest("5_0", []string{"/setup.sh"}, func(*Config) {})) t.Run("4.4lpu", integrationTest("4_4lpu", []string{"/lpu.sh"}, func(cfg *Config) { diff --git a/receiver/mongodbreceiver/internal/metadata/metrics_database_attr_version.go b/receiver/mongodbreceiver/internal/metadata/metrics_database_attr_version.go new file mode 100644 index 000000000000..038c051919e6 --- /dev/null +++ b/receiver/mongodbreceiver/internal/metadata/metrics_database_attr_version.go @@ -0,0 +1,260 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver/internal/metadata" + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func (m *metricMongodbCollectionCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +func (m *metricMongodbConnectionCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("type", connectionTypeAttributeValue) +} + +func (m *metricMongodbDataSize) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +func (m *metricMongodbDocumentOperationCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +func (m *metricMongodbExtentCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +func (m *metricMongodbIndexAccessCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, collectionAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("collection", collectionAttributeValue) +} + +func (m *metricMongodbIndexCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +func (m *metricMongodbIndexSize) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +func (m *metricMongodbLockAcquireCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue string, lockModeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) + dp.Attributes().PutStr("lock_mode", lockModeAttributeValue) +} + +func (m *metricMongodbLockAcquireTime) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue string, lockModeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) + dp.Attributes().PutStr("lock_mode", lockModeAttributeValue) +} + +func (m *metricMongodbLockAcquireWaitCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue string, lockModeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) + dp.Attributes().PutStr("lock_mode", lockModeAttributeValue) +} + +func (m *metricMongodbLockDeadlockCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue string, lockModeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) + dp.Attributes().PutStr("lock_mode", lockModeAttributeValue) +} + +func (m *metricMongodbMemoryUsage) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("type", memoryTypeAttributeValue) +} + +func (m *metricMongodbObjectCount) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +func (m *metricMongodbStorageSize) recordDataPointDatabaseAttr(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +// RecordMongodbCollectionCountDataPointDatabaseAttr adds a data point to mongodb.collection.count metric. +func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbCollectionCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue) +} + +// RecordMongodbDataSizeDataPointDatabaseAttr adds a data point to mongodb.data.size metric. +func (mb *MetricsBuilder) RecordMongodbDataSizeDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbDataSize.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue) +} + +// RecordMongodbDocumentOperationCountDataPointDatabaseAttr adds a data point to mongodb.document.operation.count metric. +func (mb *MetricsBuilder) RecordMongodbDocumentOperationCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string, operationAttributeValue AttributeOperation) { + mb.metricMongodbDocumentOperationCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue, operationAttributeValue.String()) +} + +// RecordMongodbConnectionCountDataPointDatabaseAttr adds a data point to mongodb.connection.count metric. +func (mb *MetricsBuilder) RecordMongodbConnectionCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue AttributeConnectionType) { + mb.metricMongodbConnectionCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue, connectionTypeAttributeValue.String()) +} + +// RecordMongodbExtentCountDataPointDatabaseAttr adds a data point to mongodb.extent.count metric. +func (mb *MetricsBuilder) RecordMongodbExtentCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbExtentCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue) +} + +// RecordMongodbIndexAccessCountDataPointDatabaseAttr adds a data point to mongodb.index.access.count metric. +func (mb *MetricsBuilder) RecordMongodbIndexAccessCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string, collectionAttributeValue string) { + mb.metricMongodbIndexAccessCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue, collectionAttributeValue) +} + +// RecordMongodbIndexCountDataPointDatabaseAttr adds a data point to mongodb.index.count metric. +func (mb *MetricsBuilder) RecordMongodbIndexCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbIndexCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue) +} + +// RecordMongodbIndexSizeDataPointDatabaseAttr adds a data point to mongodb.index.size metric. +func (mb *MetricsBuilder) RecordMongodbIndexSizeDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbIndexSize.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue) +} + +// RecordMongodbLockAcquireCountDataPointDatabaseAttr adds a data point to mongodb.lock.acquire.count metric. +func (mb *MetricsBuilder) RecordMongodbLockAcquireCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { + mb.metricMongodbLockAcquireCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) +} + +// RecordMongodbLockAcquireTimeDataPointDatabaseAttr adds a data point to mongodb.lock.acquire.time metric. +func (mb *MetricsBuilder) RecordMongodbLockAcquireTimeDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { + mb.metricMongodbLockAcquireTime.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) +} + +// RecordMongodbLockAcquireWaitCountDataPointDatabaseAttr adds a data point to mongodb.lock.acquire.wait_count metric. +func (mb *MetricsBuilder) RecordMongodbLockAcquireWaitCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { + mb.metricMongodbLockAcquireWaitCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) +} + +// RecordMongodbLockDeadlockCountDataPointDatabaseAttr adds a data point to mongodb.lock.deadlock.count metric. +func (mb *MetricsBuilder) RecordMongodbLockDeadlockCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { + mb.metricMongodbLockDeadlockCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) +} + +// RecordMongodbMemoryUsageDataPointDatabaseAttr adds a data point to mongodb.memory.usage metric. +func (mb *MetricsBuilder) RecordMongodbMemoryUsageDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue AttributeMemoryType) { + mb.metricMongodbMemoryUsage.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue, memoryTypeAttributeValue.String()) +} + +// RecordMongodbObjectCountDataPointDatabaseAttr adds a data point to mongodb.object.count metric. +func (mb *MetricsBuilder) RecordMongodbObjectCountDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbObjectCount.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue) +} + +// RecordMongodbStorageSizeDataPointDatabaseAttr adds a data point to mongodb.storage.size metric. +func (mb *MetricsBuilder) RecordMongodbStorageSizeDataPointDatabaseAttr(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbStorageSize.recordDataPointDatabaseAttr(mb.startTime, ts, val, databaseAttributeValue) +} diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index 8d78aec0f42a..807a0a27b9ff 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -57,73 +57,97 @@ const ( ) // DBStats -func (s *mongodbScraper) recordCollections(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordCollections(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { metricPath := []string{"collections"} metricName := "mongodb.collection.count" val, err := collectMetric(doc, metricPath) if err != nil { - errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, dbName, err)) return } - s.mb.RecordMongodbCollectionCountDataPoint(now, val) + if s.removeDatabaseAttr { + s.mb.RecordMongodbCollectionCountDataPoint(now, val) + } else { + s.mb.RecordMongodbCollectionCountDataPointDatabaseAttr(now, val, dbName) + } } -func (s *mongodbScraper) recordDataSize(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordDataSize(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { metricPath := []string{"dataSize"} metricName := "mongodb.data.size" val, err := collectMetric(doc, metricPath) if err != nil { - errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, dbName, err)) return } - s.mb.RecordMongodbDataSizeDataPoint(now, val) + if s.removeDatabaseAttr { + s.mb.RecordMongodbDataSizeDataPoint(now, val) + } else { + s.mb.RecordMongodbDataSizeDataPointDatabaseAttr(now, val, dbName) + } } -func (s *mongodbScraper) recordStorageSize(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordStorageSize(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { metricPath := []string{"storageSize"} metricName := "mongodb.storage.size" val, err := collectMetric(doc, metricPath) if err != nil { - errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, dbName, err)) return } - s.mb.RecordMongodbStorageSizeDataPoint(now, val) + if s.removeDatabaseAttr { + s.mb.RecordMongodbStorageSizeDataPoint(now, val) + } else { + s.mb.RecordMongodbStorageSizeDataPointDatabaseAttr(now, val, dbName) + } } -func (s *mongodbScraper) recordObjectCount(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordObjectCount(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { metricPath := []string{"objects"} metricName := "mongodb.object.count" val, err := collectMetric(doc, metricPath) if err != nil { - errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, dbName, err)) return } - s.mb.RecordMongodbObjectCountDataPoint(now, val) + if s.removeDatabaseAttr { + s.mb.RecordMongodbObjectCountDataPoint(now, val) + } else { + s.mb.RecordMongodbObjectCountDataPointDatabaseAttr(now, val, dbName) + } } -func (s *mongodbScraper) recordIndexCount(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordIndexCount(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { metricPath := []string{"indexes"} metricName := "mongodb.index.count" val, err := collectMetric(doc, metricPath) if err != nil { - errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, dbName, err)) return } - s.mb.RecordMongodbIndexCountDataPoint(now, val) + if s.removeDatabaseAttr { + s.mb.RecordMongodbIndexCountDataPoint(now, val) + } else { + s.mb.RecordMongodbIndexCountDataPointDatabaseAttr(now, val, dbName) + } } -func (s *mongodbScraper) recordIndexSize(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordIndexSize(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { metricPath := []string{"indexSize"} metricName := "mongodb.index.size" val, err := collectMetric(doc, metricPath) if err != nil { - errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, dbName, err)) return } - s.mb.RecordMongodbIndexSizeDataPoint(now, val) + if s.removeDatabaseAttr { + s.mb.RecordMongodbIndexSizeDataPoint(now, val) + } else { + s.mb.RecordMongodbIndexSizeDataPointDatabaseAttr(now, val, dbName) + } } -func (s *mongodbScraper) recordExtentCount(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordExtentCount(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { // Mongo version 4.4+ no longer returns numExtents since it is part of the obsolete MMAPv1 // https://www.mongodb.com/docs/manual/release-notes/4.4-compatibility/#mmapv1-cleanup mongo44, _ := version.NewVersion("4.4") @@ -132,33 +156,41 @@ func (s *mongodbScraper) recordExtentCount(now pcommon.Timestamp, doc bson.M, er metricName := "mongodb.extent.count" val, err := collectMetric(doc, metricPath) if err != nil { - errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, dbName, err)) return } - s.mb.RecordMongodbExtentCountDataPoint(now, val) + if s.removeDatabaseAttr { + s.mb.RecordMongodbExtentCountDataPoint(now, val) + } else { + s.mb.RecordMongodbExtentCountDataPointDatabaseAttr(now, val, dbName) + } } } // ServerStatus -func (s *mongodbScraper) recordConnections(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordConnections(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { for ctVal, ct := range metadata.MapAttributeConnectionType { metricPath := []string{"connections", ctVal} metricName := "mongodb.connection.count" - metricAttributes := ctVal + metricAttributes := fmt.Sprintf("%s, %s", ctVal, dbName) val, err := collectMetric(doc, metricPath) if err != nil { errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, metricAttributes, err)) continue } - s.mb.RecordMongodbConnectionCountDataPoint(now, val, ct) + if s.removeDatabaseAttr { + s.mb.RecordMongodbConnectionCountDataPoint(now, val, ct) + } else { + s.mb.RecordMongodbConnectionCountDataPointDatabaseAttr(now, val, dbName, ct) + } } } -func (s *mongodbScraper) recordMemoryUsage(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordMemoryUsage(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { for mtVal, mt := range metadata.MapAttributeMemoryType { metricPath := []string{"mem", mtVal} metricName := "mongodb.memory.usage" - metricAttributes := mtVal + metricAttributes := fmt.Sprintf("%s, %s", mtVal, dbName) val, err := collectMetric(doc, metricPath) if err != nil { errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, metricAttributes, err)) @@ -166,21 +198,29 @@ func (s *mongodbScraper) recordMemoryUsage(now pcommon.Timestamp, doc bson.M, er } // convert from mebibytes to bytes memUsageBytes := val * int64(1048576) - s.mb.RecordMongodbMemoryUsageDataPoint(now, memUsageBytes, mt) + if s.removeDatabaseAttr { + s.mb.RecordMongodbMemoryUsageDataPoint(now, memUsageBytes, mt) + } else { + s.mb.RecordMongodbMemoryUsageDataPointDatabaseAttr(now, memUsageBytes, dbName, mt) + } } } -func (s *mongodbScraper) recordDocumentOperations(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordDocumentOperations(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { for operationKey, metadataKey := range documentMap { metricPath := []string{"metrics", "document", operationKey} metricName := "mongodb.document.operation.count" - metricAttributes := operationKey + metricAttributes := fmt.Sprintf("%s, %s", operationKey, dbName) val, err := collectMetric(doc, metricPath) if err != nil { errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, metricAttributes, err)) continue } - s.mb.RecordMongodbDocumentOperationCountDataPoint(now, val, metadataKey) + if s.removeDatabaseAttr { + s.mb.RecordMongodbDocumentOperationCountDataPoint(now, val, metadataKey) + } else { + s.mb.RecordMongodbDocumentOperationCountDataPointDatabaseAttr(now, val, dbName, metadataKey) + } } } @@ -351,7 +391,7 @@ func (s *mongodbScraper) recordHealth(now pcommon.Timestamp, doc bson.M, errs *s } // Lock Metrics are only supported by MongoDB v3.2+ -func (s *mongodbScraper) recordLockAcquireCounts(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordLockAcquireCounts(now pcommon.Timestamp, doc bson.M, dBName string, errs *scrapererror.ScrapeErrors) { mongo32, _ := version.NewVersion("3.2") if s.mongoVersion.LessThan(mongo32) { return @@ -365,7 +405,7 @@ func (s *mongodbScraper) recordLockAcquireCounts(now pcommon.Timestamp, doc bson } metricPath := []string{"locks", lockTypeKey, "acquireCount", lockModeKey} metricName := "mongodb.lock.acquire.count" - metricAttributes := fmt.Sprintf("%s, %s", lockTypeAttribute.String(), lockModeAttribute.String()) + metricAttributes := fmt.Sprintf("%s, %s, %s", dBName, lockTypeAttribute.String(), lockModeAttribute.String()) val, err := collectMetric(doc, metricPath) // MongoDB only publishes this lock metric is it is available. // Do not raise error when key is not found @@ -376,12 +416,16 @@ func (s *mongodbScraper) recordLockAcquireCounts(now pcommon.Timestamp, doc bson errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, metricAttributes, err)) continue } - s.mb.RecordMongodbLockAcquireCountDataPoint(now, val, lockTypeAttribute, lockModeAttribute) + if s.removeDatabaseAttr { + s.mb.RecordMongodbLockAcquireCountDataPoint(now, val, lockTypeAttribute, lockModeAttribute) + } else { + s.mb.RecordMongodbLockAcquireCountDataPointDatabaseAttr(now, val, dBName, lockTypeAttribute, lockModeAttribute) + } } } } -func (s *mongodbScraper) recordLockAcquireWaitCounts(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordLockAcquireWaitCounts(now pcommon.Timestamp, doc bson.M, dBName string, errs *scrapererror.ScrapeErrors) { mongo32, _ := version.NewVersion("3.2") if s.mongoVersion.LessThan(mongo32) { return @@ -395,7 +439,7 @@ func (s *mongodbScraper) recordLockAcquireWaitCounts(now pcommon.Timestamp, doc } metricPath := []string{"locks", lockTypeKey, "acquireWaitCount", lockModeKey} metricName := "mongodb.lock.acquire.wait_count" - metricAttributes := fmt.Sprintf("%s, %s", lockTypeAttribute.String(), lockModeAttribute.String()) + metricAttributes := fmt.Sprintf("%s, %s, %s", dBName, lockTypeAttribute.String(), lockModeAttribute.String()) val, err := collectMetric(doc, metricPath) // MongoDB only publishes this lock metric is it is available. // Do not raise error when key is not found @@ -406,12 +450,16 @@ func (s *mongodbScraper) recordLockAcquireWaitCounts(now pcommon.Timestamp, doc errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, metricAttributes, err)) continue } - s.mb.RecordMongodbLockAcquireWaitCountDataPoint(now, val, lockTypeAttribute, lockModeAttribute) + if s.removeDatabaseAttr { + s.mb.RecordMongodbLockAcquireWaitCountDataPoint(now, val, lockTypeAttribute, lockModeAttribute) + } else { + s.mb.RecordMongodbLockAcquireWaitCountDataPointDatabaseAttr(now, val, dBName, lockTypeAttribute, lockModeAttribute) + } } } } -func (s *mongodbScraper) recordLockTimeAcquiringMicros(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordLockTimeAcquiringMicros(now pcommon.Timestamp, doc bson.M, dBName string, errs *scrapererror.ScrapeErrors) { mongo32, _ := version.NewVersion("3.2") if s.mongoVersion.LessThan(mongo32) { return @@ -425,7 +473,7 @@ func (s *mongodbScraper) recordLockTimeAcquiringMicros(now pcommon.Timestamp, do } metricPath := []string{"locks", lockTypeKey, "timeAcquiringMicros", lockModeKey} metricName := "mongodb.lock.acquire.time" - metricAttributes := fmt.Sprintf("%s, %s", lockTypeAttribute.String(), lockModeAttribute.String()) + metricAttributes := fmt.Sprintf("%s, %s, %s", dBName, lockTypeAttribute.String(), lockModeAttribute.String()) val, err := collectMetric(doc, metricPath) // MongoDB only publishes this lock metric is it is available. // Do not raise error when key is not found @@ -436,12 +484,16 @@ func (s *mongodbScraper) recordLockTimeAcquiringMicros(now pcommon.Timestamp, do errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, metricAttributes, err)) continue } - s.mb.RecordMongodbLockAcquireTimeDataPoint(now, val, lockTypeAttribute, lockModeAttribute) + if s.removeDatabaseAttr { + s.mb.RecordMongodbLockAcquireTimeDataPoint(now, val, lockTypeAttribute, lockModeAttribute) + } else { + s.mb.RecordMongodbLockAcquireTimeDataPointDatabaseAttr(now, val, dBName, lockTypeAttribute, lockModeAttribute) + } } } } -func (s *mongodbScraper) recordLockDeadlockCount(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordLockDeadlockCount(now pcommon.Timestamp, doc bson.M, dBName string, errs *scrapererror.ScrapeErrors) { mongo32, _ := version.NewVersion("3.2") if s.mongoVersion.LessThan(mongo32) { return @@ -455,7 +507,7 @@ func (s *mongodbScraper) recordLockDeadlockCount(now pcommon.Timestamp, doc bson } metricPath := []string{"locks", lockTypeKey, "deadlockCount", lockModeKey} metricName := "mongodb.lock.deadlock.count" - metricAttributes := fmt.Sprintf("%s, %s", lockTypeAttribute.String(), lockModeAttribute.String()) + metricAttributes := fmt.Sprintf("%s, %s, %s", dBName, lockTypeAttribute.String(), lockModeAttribute.String()) val, err := collectMetric(doc, metricPath) // MongoDB only publishes this lock metric is it is available. // Do not raise error when key is not found @@ -466,17 +518,21 @@ func (s *mongodbScraper) recordLockDeadlockCount(now pcommon.Timestamp, doc bson errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, metricAttributes, err)) continue } - s.mb.RecordMongodbLockDeadlockCountDataPoint(now, val, lockTypeAttribute, lockModeAttribute) + if s.removeDatabaseAttr { + s.mb.RecordMongodbLockDeadlockCountDataPoint(now, val, lockTypeAttribute, lockModeAttribute) + } else { + s.mb.RecordMongodbLockDeadlockCountDataPointDatabaseAttr(now, val, dBName, lockTypeAttribute, lockModeAttribute) + } } } } // Index Stats -func (s *mongodbScraper) recordIndexAccess(now pcommon.Timestamp, documents []bson.M, collectionName string, errs *scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordIndexAccess(now pcommon.Timestamp, documents []bson.M, dbName string, collectionName string, errs *scrapererror.ScrapeErrors) { metricName := "mongodb.index.access.count" var indexAccessTotal int64 for _, doc := range documents { - metricAttributes := collectionName + metricAttributes := fmt.Sprintf("%s, %s", dbName, collectionName) indexAccess, ok := doc["accesses"].(bson.M)["ops"] if !ok { err := errors.New("could not find key for index access metric") @@ -490,7 +546,11 @@ func (s *mongodbScraper) recordIndexAccess(now pcommon.Timestamp, documents []bs } indexAccessTotal += indexAccessValue } - s.mb.RecordMongodbIndexAccessCountDataPoint(now, indexAccessTotal, collectionName) + if s.removeDatabaseAttr { + s.mb.RecordMongodbIndexAccessCountDataPoint(now, indexAccessTotal, collectionName) + } else { + s.mb.RecordMongodbIndexAccessCountDataPointDatabaseAttr(now, indexAccessTotal, dbName, collectionName) + } } // Top Stats diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index 4672360cce4f..8ad682c16acf 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -35,7 +35,7 @@ var ( featuregate.StageAlpha, featuregate.WithRegisterDescription("Remove duplicate database name attribute"), featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/24972"), - featuregate.WithRegisterFromVersion("v0.89.0")) + featuregate.WithRegisterFromVersion("v0.90.0")) ) type mongodbScraper struct { @@ -134,7 +134,7 @@ func (s *mongodbScraper) collectDatabase(ctx context.Context, now pcommon.Timest if err != nil { errs.AddPartial(1, fmt.Errorf("failed to fetch database stats metrics: %w", err)) } else { - s.recordDBStats(now, dbStats, errs) + s.recordDBStats(now, dbStats, databaseName, errs) } serverStatus, err := s.client.ServerStatus(ctx, databaseName) @@ -142,7 +142,7 @@ func (s *mongodbScraper) collectDatabase(ctx context.Context, now pcommon.Timest errs.AddPartial(1, fmt.Errorf("failed to fetch server status metrics: %w", err)) return } - s.recordNormalServerStats(now, serverStatus, errs) + s.recordNormalServerStats(now, serverStatus, databaseName, errs) rb := s.mb.NewResourceBuilder() rb.SetDatabase(databaseName) @@ -178,31 +178,35 @@ func (s *mongodbScraper) collectIndexStats(ctx context.Context, now pcommon.Time errs.AddPartial(1, fmt.Errorf("failed to fetch index stats metrics: %w", err)) return } - s.recordIndexStats(now, indexStats, collectionName, errs) + s.recordIndexStats(now, indexStats, databaseName, collectionName, errs) - rb := s.mb.NewResourceBuilder() - rb.SetDatabase(databaseName) - s.mb.EmitForResource(metadata.WithResource(rb.Emit())) + if s.removeDatabaseAttr { + rb := s.mb.NewResourceBuilder() + rb.SetDatabase(databaseName) + s.mb.EmitForResource(metadata.WithResource(rb.Emit())) + } else { + s.mb.EmitForResource() + } } -func (s *mongodbScraper) recordDBStats(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { - s.recordCollections(now, doc, errs) - s.recordDataSize(now, doc, errs) - s.recordExtentCount(now, doc, errs) - s.recordIndexSize(now, doc, errs) - s.recordIndexCount(now, doc, errs) - s.recordObjectCount(now, doc, errs) - s.recordStorageSize(now, doc, errs) +func (s *mongodbScraper) recordDBStats(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { + s.recordCollections(now, doc, dbName, errs) + s.recordDataSize(now, doc, dbName, errs) + s.recordExtentCount(now, doc, dbName, errs) + s.recordIndexSize(now, doc, dbName, errs) + s.recordIndexCount(now, doc, dbName, errs) + s.recordObjectCount(now, doc, dbName, errs) + s.recordStorageSize(now, doc, dbName, errs) } -func (s *mongodbScraper) recordNormalServerStats(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { - s.recordConnections(now, doc, errs) - s.recordDocumentOperations(now, doc, errs) - s.recordMemoryUsage(now, doc, errs) - s.recordLockAcquireCounts(now, doc, errs) - s.recordLockAcquireWaitCounts(now, doc, errs) - s.recordLockTimeAcquiringMicros(now, doc, errs) - s.recordLockDeadlockCount(now, doc, errs) +func (s *mongodbScraper) recordNormalServerStats(now pcommon.Timestamp, doc bson.M, dbName string, errs *scrapererror.ScrapeErrors) { + s.recordConnections(now, doc, dbName, errs) + s.recordDocumentOperations(now, doc, dbName, errs) + s.recordMemoryUsage(now, doc, dbName, errs) + s.recordLockAcquireCounts(now, doc, dbName, errs) + s.recordLockAcquireWaitCounts(now, doc, dbName, errs) + s.recordLockTimeAcquiringMicros(now, doc, dbName, errs) + s.recordLockDeadlockCount(now, doc, dbName, errs) } func (s *mongodbScraper) recordAdminStats(now pcommon.Timestamp, document bson.M, errs *scrapererror.ScrapeErrors) { @@ -219,6 +223,6 @@ func (s *mongodbScraper) recordAdminStats(now pcommon.Timestamp, document bson.M s.recordHealth(now, document, errs) } -func (s *mongodbScraper) recordIndexStats(now pcommon.Timestamp, indexStats []bson.M, collectionName string, errs *scrapererror.ScrapeErrors) { - s.recordIndexAccess(now, indexStats, collectionName, errs) +func (s *mongodbScraper) recordIndexStats(now pcommon.Timestamp, indexStats []bson.M, databaseName string, collectionName string, errs *scrapererror.ScrapeErrors) { + s.recordIndexAccess(now, indexStats, databaseName, collectionName, errs) } diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index bcb7f94b57d9..bcfa156fd2f6 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -66,23 +66,23 @@ var ( "failed to collect metric mongodb.operation.count with attribute(s) update: could not find key for metric", "failed to collect metric mongodb.session.count: could not find key for metric", "failed to collect metric mongodb.operation.time: could not find key for metric", - "failed to collect metric mongodb.collection.count: could not find key for metric", - "failed to collect metric mongodb.data.size: could not find key for metric", - "failed to collect metric mongodb.extent.count: could not find key for metric", - "failed to collect metric mongodb.index.size: could not find key for metric", - "failed to collect metric mongodb.index.count: could not find key for metric", - "failed to collect metric mongodb.object.count: could not find key for metric", - "failed to collect metric mongodb.storage.size: could not find key for metric", - "failed to collect metric mongodb.connection.count with attribute(s) available: could not find key for metric", - "failed to collect metric mongodb.connection.count with attribute(s) current: could not find key for metric", - "failed to collect metric mongodb.connection.count with attribute(s) active: could not find key for metric", - "failed to collect metric mongodb.document.operation.count with attribute(s) inserted: could not find key for metric", - "failed to collect metric mongodb.document.operation.count with attribute(s) updated: could not find key for metric", - "failed to collect metric mongodb.document.operation.count with attribute(s) deleted: could not find key for metric", - "failed to collect metric mongodb.memory.usage with attribute(s) resident: could not find key for metric", - "failed to collect metric mongodb.memory.usage with attribute(s) virtual: could not find key for metric", - "failed to collect metric mongodb.index.access.count with attribute(s) orders: could not find key for index access metric", - "failed to collect metric mongodb.index.access.count with attribute(s) products: could not find key for index access metric", + "failed to collect metric mongodb.collection.count with attribute(s) fakedatabase: could not find key for metric", + "failed to collect metric mongodb.data.size with attribute(s) fakedatabase: could not find key for metric", + "failed to collect metric mongodb.extent.count with attribute(s) fakedatabase: could not find key for metric", + "failed to collect metric mongodb.index.size with attribute(s) fakedatabase: could not find key for metric", + "failed to collect metric mongodb.index.count with attribute(s) fakedatabase: could not find key for metric", + "failed to collect metric mongodb.object.count with attribute(s) fakedatabase: could not find key for metric", + "failed to collect metric mongodb.storage.size with attribute(s) fakedatabase: could not find key for metric", + "failed to collect metric mongodb.connection.count with attribute(s) available, fakedatabase: could not find key for metric", + "failed to collect metric mongodb.connection.count with attribute(s) current, fakedatabase: could not find key for metric", + "failed to collect metric mongodb.connection.count with attribute(s) active, fakedatabase: could not find key for metric", + "failed to collect metric mongodb.document.operation.count with attribute(s) inserted, fakedatabase: could not find key for metric", + "failed to collect metric mongodb.document.operation.count with attribute(s) updated, fakedatabase: could not find key for metric", + "failed to collect metric mongodb.document.operation.count with attribute(s) deleted, fakedatabase: could not find key for metric", + "failed to collect metric mongodb.memory.usage with attribute(s) resident, fakedatabase: could not find key for metric", + "failed to collect metric mongodb.memory.usage with attribute(s) virtual, fakedatabase: could not find key for metric", + "failed to collect metric mongodb.index.access.count with attribute(s) fakedatabase, orders: could not find key for index access metric", + "failed to collect metric mongodb.index.access.count with attribute(s) fakedatabase, products: could not find key for index access metric", "failed to collect metric mongodb.operation.latency.time with attribute(s) command: could not find key for metric", "failed to collect metric mongodb.operation.latency.time with attribute(s) read: could not find key for metric", "failed to collect metric mongodb.operation.latency.time with attribute(s) write: could not find key for metric", @@ -289,6 +289,9 @@ func TestScraperScrape(t *testing.T) { scraper := newMongodbScraper(receivertest.NewNopCreateSettings(), scraperCfg) + // Set removeDatabaseAttr as true to simulate enable removeDatabaseAttrFeatureGate + scraper.removeDatabaseAttr = true + mc := tc.setupMockClient(t) if mc != nil { scraper.client = mc