From 625c3a603cce4f43526d0ee0fa7fdae26fcc1413 Mon Sep 17 00:00:00 2001 From: jackgopack4 Date: Tue, 30 Jul 2024 11:06:38 -0400 Subject: [PATCH] squash jackgopack4 commits --- ...odeboten_update-units-processorhelper.yaml | 25 ++ .chloggen/componenttest-extra-attributes.yaml | 18 ++ .../confmap-increase-recursive-count.yaml | 25 ++ .chloggen/document_factories.yaml | 25 ++ .chloggen/document_fields.yaml | 25 ++ .chloggen/experimental_include_metadata.yaml | 25 ++ ...per-report-data-type-in-queue-metrics.yaml | 20 ++ .chloggen/exporterhelper_metric_units.yaml | 25 ++ .chloggen/fix-env-var-double-escaping.yaml | 27 ++ .chloggen/jpkroehling-grpc-statuscode.yaml | 12 + ...mx-psi_string-value-for-string-fields.yaml | 27 ++ .chloggen/mx-psi_validate-uris.yaml | 25 ++ .chloggen/ocb-migration.yaml | 25 ++ .chloggen/profiles-consumertest.yaml | 25 ++ .chloggen/receiverhelper_metric_units.yaml | 25 ++ .chloggen/scraperhelper_metric_units.yaml | 25 ++ .chloggen/service-remove-ballast-deps-2.yaml | 25 ++ .chloggen/service-remove-ballast-deps-3.yaml | 25 ++ .chloggen/service-remove-ballast-deps.yaml | 25 ++ .github/workflows/codeql-analysis.yml | 6 +- .github/workflows/scorecard.yml | 2 +- ...r-release.yaml => sourcecode-release.yaml} | 17 +- Makefile | 4 + cmd/builder/.goreleaser.yml | 39 --- cmd/builder/internal/builder/main_test.go | 2 + cmd/builder/test/core.builder.yaml | 1 + cmd/mdatagen/go.mod | 7 + cmd/otelcorecol/builder-config.yaml | 2 + cmd/otelcorecol/go.mod | 7 + component/componentprofiles/go.mod | 2 +- component/componenttest/obsreporttest.go | 7 +- .../componenttest/otelprometheuschecker.go | 6 +- component/status.go | 22 +- config/configgrpc/configgrpc.go | 7 +- config/configgrpc/configgrpc_test.go | 8 +- config/configgrpc/go.mod | 4 + config/confighttp/confighttp.go | 1 - config/confighttp/go.mod | 4 + config/internal/go.mod | 4 + confmap/confmap.go | 73 ++++- confmap/confmap_test.go | 32 +++ confmap/converter/expandconverter/expand.go | 1 + confmap/expand.go | 59 +++- confmap/expand_test.go | 43 --- confmap/internal/e2e/expand_test.go | 94 +++++++ confmap/internal/e2e/fuzz_test.go | 88 ++++++ confmap/internal/e2e/go.mod | 6 + .../e2e/testdata/expand-escaped-env.yaml | 29 ++ confmap/internal/e2e/types_test.go | 259 +++++++++++++----- confmap/provider.go | 8 +- .../configurablehttpprovider/provider.go | 5 + .../configurablehttpprovider/provider_test.go | 30 +- confmap/provider_test.go | 4 +- confmap/resolver.go | 12 +- confmap/testdata/expand-escaped-env.yaml | 31 --- connector/forwardconnector/go.mod | 7 + connector/go.mod | 6 + consumer/consumertest/Makefile | 1 + consumer/consumertest/consumer.go | 7 + consumer/consumertest/err.go | 8 +- consumer/consumertest/err_test.go | 2 + consumer/consumertest/go.mod | 40 +++ consumer/consumertest/go.sum | 77 ++++++ consumer/consumertest/nop.go | 8 +- consumer/consumertest/nop_test.go | 2 + consumer/consumertest/sink.go | 40 +++ consumer/consumertest/sink_test.go | 14 + docs/release.md | 17 +- docs/rfcs/env-vars.md | 13 +- docs/security-best-practices.md | 103 +++++++ exporter/debugexporter/go.mod | 6 + exporter/exporterhelper/common.go | 6 +- exporter/exporterhelper/documentation.md | 22 +- .../internal/metadata/generated_telemetry.go | 22 +- exporter/exporterhelper/metadata.yaml | 22 +- exporter/exporterhelper/obsexporter.go | 4 +- exporter/exporterhelper/obsreport_test.go | 1 - exporter/exporterhelper/queue_sender.go | 25 +- exporter/exporterhelper/queue_sender_test.go | 53 ++-- exporter/go.mod | 6 + exporter/internal/factory.go | 16 +- exporter/loggingexporter/go.mod | 7 + exporter/nopexporter/go.mod | 9 +- exporter/otlpexporter/go.mod | 6 + exporter/otlphttpexporter/go.mod | 7 + extension/ballastextension/go.mod | 4 + extension/memorylimiterextension/go.mod | 4 + extension/zpagesextension/go.mod | 4 + go.mod | 6 + internal/e2e/go.mod | 6 + internal/memorylimiter/memorylimiter.go | 23 +- internal/memorylimiter/memorylimiter_test.go | 54 ---- .../obsmetrics/obs_exporter.go | 3 + otelcol/go.mod | 6 + otelcol/otelcoltest/go.mod | 6 + processor/batchprocessor/go.mod | 6 + processor/go.mod | 6 + processor/internal/factory.go | 15 +- processor/memorylimiterprocessor/go.mod | 6 + .../memorylimiter_test.go | 105 +------ processor/processorhelper/documentation.md | 24 +- .../internal/metadata/generated_telemetry.go | 24 +- processor/processorhelper/metadata.yaml | 24 +- processor/processorhelper/obsreport.go | 58 ++-- receiver/go.mod | 7 + receiver/internal/factory.go | 15 +- receiver/nopreceiver/go.mod | 7 + receiver/otlpreceiver/go.mod | 6 + receiver/receiverhelper/documentation.md | 12 +- .../internal/metadata/generated_telemetry.go | 12 +- receiver/receiverhelper/metadata.yaml | 12 +- receiver/scraperhelper/documentation.md | 4 +- .../internal/metadata/generated_telemetry.go | 4 +- receiver/scraperhelper/metadata.yaml | 4 +- service/go.mod | 6 + service/internal/graph/graph_test.go | 2 +- .../proctelemetry/process_telemetry.go | 10 +- .../process_telemetry_linux_test.go | 2 +- .../proctelemetry/process_telemetry_test.go | 2 +- service/service.go | 11 +- service/telemetry/tracer.go | 10 +- service/telemetry/tracer_test.go | 61 +++++ versions.yaml | 1 + 123 files changed, 1849 insertions(+), 660 deletions(-) create mode 100644 .chloggen/codeboten_update-units-processorhelper.yaml create mode 100644 .chloggen/componenttest-extra-attributes.yaml create mode 100644 .chloggen/confmap-increase-recursive-count.yaml create mode 100644 .chloggen/document_factories.yaml create mode 100644 .chloggen/document_fields.yaml create mode 100644 .chloggen/experimental_include_metadata.yaml create mode 100644 .chloggen/exporterhelper-report-data-type-in-queue-metrics.yaml create mode 100644 .chloggen/exporterhelper_metric_units.yaml create mode 100644 .chloggen/fix-env-var-double-escaping.yaml create mode 100644 .chloggen/jpkroehling-grpc-statuscode.yaml create mode 100644 .chloggen/mx-psi_string-value-for-string-fields.yaml create mode 100644 .chloggen/mx-psi_validate-uris.yaml create mode 100644 .chloggen/ocb-migration.yaml create mode 100644 .chloggen/profiles-consumertest.yaml create mode 100644 .chloggen/receiverhelper_metric_units.yaml create mode 100644 .chloggen/scraperhelper_metric_units.yaml create mode 100644 .chloggen/service-remove-ballast-deps-2.yaml create mode 100644 .chloggen/service-remove-ballast-deps-3.yaml create mode 100644 .chloggen/service-remove-ballast-deps.yaml rename .github/workflows/{builder-release.yaml => sourcecode-release.yaml} (72%) delete mode 100644 cmd/builder/.goreleaser.yml create mode 100644 confmap/internal/e2e/expand_test.go create mode 100644 confmap/internal/e2e/fuzz_test.go create mode 100644 confmap/internal/e2e/testdata/expand-escaped-env.yaml delete mode 100644 confmap/testdata/expand-escaped-env.yaml create mode 100644 consumer/consumertest/Makefile create mode 100644 consumer/consumertest/go.mod create mode 100644 consumer/consumertest/go.sum create mode 100644 service/telemetry/tracer_test.go diff --git a/.chloggen/codeboten_update-units-processorhelper.yaml b/.chloggen/codeboten_update-units-processorhelper.yaml new file mode 100644 index 00000000000..c352ec40509 --- /dev/null +++ b/.chloggen/codeboten_update-units-processorhelper.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: processorhelper + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: update units for internal telemetry + +# One or more tracking issues or pull requests related to the change +issues: [10647] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/componenttest-extra-attributes.yaml b/.chloggen/componenttest-extra-attributes.yaml new file mode 100644 index 00000000000..3683ca634b2 --- /dev/null +++ b/.chloggen/componenttest-extra-attributes.yaml @@ -0,0 +1,18 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: component/componenttest + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add optional ...attribute.KeyValue argument to TestTelemetry.CheckExporterMetricGauge. + +# One or more tracking issues or pull requests related to the change +issues: [10593] + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/confmap-increase-recursive-count.yaml b/.chloggen/confmap-increase-recursive-count.yaml new file mode 100644 index 00000000000..cf8a8fe479a --- /dev/null +++ b/.chloggen/confmap-increase-recursive-count.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: confmap + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Increase the amount of recursion and URI expansions allowed in a single line + +# One or more tracking issues or pull requests related to the change +issues: [10712] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/document_factories.yaml b/.chloggen/document_factories.yaml new file mode 100644 index 00000000000..fffb842e1b6 --- /dev/null +++ b/.chloggen/document_factories.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: exporter, processor, receiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Document factory functions. + +# One or more tracking issues or pull requests related to the change +issues: [9323] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/document_fields.yaml b/.chloggen/document_fields.yaml new file mode 100644 index 00000000000..cd2af1bcb6c --- /dev/null +++ b/.chloggen/document_fields.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: component + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Document status enums and New constructors + +# One or more tracking issues or pull requests related to the change +issues: [9822] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/experimental_include_metadata.yaml b/.chloggen/experimental_include_metadata.yaml new file mode 100644 index 00000000000..11a936f5b8d --- /dev/null +++ b/.chloggen/experimental_include_metadata.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: confighttp, configgrpc + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Remove the experimental comment on `IncludeMetadata` in confighttp and configgrpc + +# One or more tracking issues or pull requests related to the change +issues: [9381] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/exporterhelper-report-data-type-in-queue-metrics.yaml b/.chloggen/exporterhelper-report-data-type-in-queue-metrics.yaml new file mode 100644 index 00000000000..e4065d6e4bc --- /dev/null +++ b/.chloggen/exporterhelper-report-data-type-in-queue-metrics.yaml @@ -0,0 +1,20 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: exporterhelper + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add data_type attribute to `otelcol_exporter_queue_size` metric to report the type of data being processed. + +# One or more tracking issues or pull requests related to the change +issues: [9943] + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/exporterhelper_metric_units.yaml b/.chloggen/exporterhelper_metric_units.yaml new file mode 100644 index 00000000000..de6a645ffe3 --- /dev/null +++ b/.chloggen/exporterhelper_metric_units.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: exporterhelper + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Update units for internal telemetry + +# One or more tracking issues or pull requests related to the change +issues: [10648] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/fix-env-var-double-escaping.yaml b/.chloggen/fix-env-var-double-escaping.yaml new file mode 100644 index 00000000000..5ea730a1965 --- /dev/null +++ b/.chloggen/fix-env-var-double-escaping.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: confmap + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix wrong expansion of environment variables escaped with `$$`, e.g. `$${ENV_VAR}` and `$$ENV_VAR`. + +# One or more tracking issues or pull requests related to the change +issues: [10713] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + This change fixes the issue where environment variables escaped with $$ were expanded. + The collector now converts `$${ENV_VAR}` to `${ENV_VAR}` and `$$ENV_VAR` to `$ENV_VAR` without further expansion. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/jpkroehling-grpc-statuscode.yaml b/.chloggen/jpkroehling-grpc-statuscode.yaml new file mode 100644 index 00000000000..d78ef4c7a9a --- /dev/null +++ b/.chloggen/jpkroehling-grpc-statuscode.yaml @@ -0,0 +1,12 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: configgrpc + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: gRPC auth errors now return gRPC status code UNAUTHENTICATED (16) + +# One or more tracking issues or pull requests related to the change +issues: [7646] + diff --git a/.chloggen/mx-psi_string-value-for-string-fields.yaml b/.chloggen/mx-psi_string-value-for-string-fields.yaml new file mode 100644 index 00000000000..0a7a49175c4 --- /dev/null +++ b/.chloggen/mx-psi_string-value-for-string-fields.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: confmap + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: When passing configuration for a string field using any provider, use the verbatim string representation as the value. + +# One or more tracking issues or pull requests related to the change +issues: [10605, 10405] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + This matches the behavior of `${ENV}` syntax prior to the promotion of the `confmap.unifyEnvVarExpansion` feature gate + to beta. It changes the behavior of the `${env:ENV}` syntax with escaped strings. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/mx-psi_validate-uris.yaml b/.chloggen/mx-psi_validate-uris.yaml new file mode 100644 index 00000000000..69af525d77d --- /dev/null +++ b/.chloggen/mx-psi_validate-uris.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: httpprovider, httpsprovider + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Validate URIs in HTTP and HTTPS providers before fetching. + +# One or more tracking issues or pull requests related to the change +issues: [10468] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/ocb-migration.yaml b/.chloggen/ocb-migration.yaml new file mode 100644 index 00000000000..1c59d893b3a --- /dev/null +++ b/.chloggen/ocb-migration.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: ocb + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: migrate build and release of ocb binaries to opentelemetry-collector-releases repository + +# One or more tracking issues or pull requests related to the change +issues: [10710] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: ocb will be released under open-telemetry/opentelemetry-collector-releases tagged as "cmd/builder/vX.XXX.X" + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/profiles-consumertest.yaml b/.chloggen/profiles-consumertest.yaml new file mode 100644 index 00000000000..b211551f461 --- /dev/null +++ b/.chloggen/profiles-consumertest.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: consumer/consumertest + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Allow testing profiles with consumertest. + +# One or more tracking issues or pull requests related to the change +issues: [10692] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/receiverhelper_metric_units.yaml b/.chloggen/receiverhelper_metric_units.yaml new file mode 100644 index 00000000000..3f06210459e --- /dev/null +++ b/.chloggen/receiverhelper_metric_units.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: receiverhelper + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Update units for internal telemetry + +# One or more tracking issues or pull requests related to the change +issues: [10650] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/scraperhelper_metric_units.yaml b/.chloggen/scraperhelper_metric_units.yaml new file mode 100644 index 00000000000..3e754f6a991 --- /dev/null +++ b/.chloggen/scraperhelper_metric_units.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: scraperhelper + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Update units for internal telemetry + +# One or more tracking issues or pull requests related to the change +issues: [10649] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/service-remove-ballast-deps-2.yaml b/.chloggen/service-remove-ballast-deps-2.yaml new file mode 100644 index 00000000000..e6377b387de --- /dev/null +++ b/.chloggen/service-remove-ballast-deps-2.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: processor/memorylimiter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: The memory limiter processor will no longer account for ballast size. + +# One or more tracking issues or pull requests related to the change +issues: [10696] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: If you are already using GOMEMLIMIT instead of the ballast extension this does not affect you. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/service-remove-ballast-deps-3.yaml b/.chloggen/service-remove-ballast-deps-3.yaml new file mode 100644 index 00000000000..c07f90439f6 --- /dev/null +++ b/.chloggen/service-remove-ballast-deps-3.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: extension/memorylimiter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: The memory limiter extension will no longer account for ballast size. + +# One or more tracking issues or pull requests related to the change +issues: [10696] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: If you are already using GOMEMLIMIT instead of the ballast extension this does not affect you. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/service-remove-ballast-deps.yaml b/.chloggen/service-remove-ballast-deps.yaml new file mode 100644 index 00000000000..31ffa90c3ac --- /dev/null +++ b/.chloggen/service-remove-ballast-deps.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: service + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: The service will no longer be able to get a ballast size from the deprecated ballast extension. + +# One or more tracking issues or pull requests related to the change +issues: [10696] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: If you are already using GOMEMLIMIT instead of the ballast extension this does not affect you. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 94ffea52618..895006c3f3a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -30,12 +30,12 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 + uses: github/codeql-action/init@2d790406f505036ef40ecba973cc774a50395aac # v3.25.13 with: languages: go - name: Autobuild - uses: github/codeql-action/autobuild@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 + uses: github/codeql-action/autobuild@2d790406f505036ef40ecba973cc774a50395aac # v3.25.13 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 + uses: github/codeql-action/analyze@2d790406f505036ef40ecba973cc774a50395aac # v3.25.13 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index eeb02e0464c..9b418a3ec9f 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 + uses: github/codeql-action/upload-sarif@2d790406f505036ef40ecba973cc774a50395aac # v3.25.13 with: sarif_file: results.sarif diff --git a/.github/workflows/builder-release.yaml b/.github/workflows/sourcecode-release.yaml similarity index 72% rename from .github/workflows/builder-release.yaml rename to .github/workflows/sourcecode-release.yaml index b6ae4795adb..7e767539fc9 100644 --- a/.github/workflows/builder-release.yaml +++ b/.github/workflows/sourcecode-release.yaml @@ -1,9 +1,9 @@ -name: Builder - Release +name: Source Code - Release on: push: tags: - - 'v*' + - "v*" jobs: goreleaser: @@ -13,19 +13,6 @@ jobs: uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 - with: - go-version: ~1.21.5 - - name: Run GoReleaser - uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0 - with: - distribution: goreleaser-pro - version: latest - args: release --clean -f cmd/builder/.goreleaser.yml - env: - GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create Github Release run: | gh release create ${{ github.ref_name }} -t ${{ github.ref_name }} -n "### Images and binaries here: https://github.com/open-telemetry/opentelemetry-collector-releases/releases/tag/${{ github.ref_name }}" diff --git a/Makefile b/Makefile index 46b5cffb5dc..a89ad8e23f4 100644 --- a/Makefile +++ b/Makefile @@ -278,6 +278,8 @@ check-contrib: -replace go.opentelemetry.io/collector/connector=$(CURDIR)/connector \ -replace go.opentelemetry.io/collector/connector/forwardconnector=$(CURDIR)/connector/forwardconnector \ -replace go.opentelemetry.io/collector/consumer=$(CURDIR)/consumer \ + -replace go.opentelemetry.io/collector/consumer/consumerprofiles=$(CURDIR)/consumer/consumerprofiles \ + -replace go.opentelemetry.io/collector/consumer/consumertest=$(CURDIR)/consumer/consumertest \ -replace go.opentelemetry.io/collector/exporter=$(CURDIR)/exporter \ -replace go.opentelemetry.io/collector/exporter/debugexporter=$(CURDIR)/exporter/debugexporter \ -replace go.opentelemetry.io/collector/exporter/loggingexporter=$(CURDIR)/exporter/loggingexporter \ @@ -337,6 +339,8 @@ restore-contrib: -dropreplace go.opentelemetry.io/collector/connector \ -dropreplace go.opentelemetry.io/collector/connector/forwardconnector \ -dropreplace go.opentelemetry.io/collector/consumer \ + -dropreplace go.opentelemetry.io/collector/consumer/consumerprofiles \ + -dropreplace go.opentelemetry.io/collector/consumer/consumertest \ -dropreplace go.opentelemetry.io/collector/exporter \ -dropreplace go.opentelemetry.io/collector/exporter/debugexporter \ -dropreplace go.opentelemetry.io/collector/exporter/loggingexporter \ diff --git a/cmd/builder/.goreleaser.yml b/cmd/builder/.goreleaser.yml deleted file mode 100644 index a964ac6c6ee..00000000000 --- a/cmd/builder/.goreleaser.yml +++ /dev/null @@ -1,39 +0,0 @@ -before: - hooks: - - go mod download -monorepo: - tag_prefix: cmd/builder/ - dir: cmd/builder -builds: - - flags: - - -trimpath - ldflags: - - -s -w -X go.opentelemetry.io/collector/cmd/builder/internal.version={{.Version}} -X go.opentelemetry.io/collector/cmd/builder/internal.date={{.Date}} - env: - - CGO_ENABLED=0 - goos: - - linux - - windows - - darwin - goarch: - - amd64 - - arm64 - - ppc64le - ignore: - - goos: windows - goarch: arm64 - binary: ocb -release: - github: - owner: open-telemetry - name: opentelemetry-collector - header: | - ### Images and binaries here: https://github.com/open-telemetry/opentelemetry-collector-releases/releases/tag/{{ .Tag }} -archives: - - format: binary -checksum: - name_template: "checksums.txt" -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - disable: true diff --git a/cmd/builder/internal/builder/main_test.go b/cmd/builder/internal/builder/main_test.go index d2c06279eb1..44e1b37513e 100644 --- a/cmd/builder/internal/builder/main_test.go +++ b/cmd/builder/internal/builder/main_test.go @@ -59,6 +59,8 @@ var ( "/confmap/provider/httpsprovider", "/confmap/provider/yamlprovider", "/consumer", + "/consumer/consumerprofiles", + "/consumer/consumertest", "/connector", "/exporter", "/exporter/debugexporter", diff --git a/cmd/builder/test/core.builder.yaml b/cmd/builder/test/core.builder.yaml index 03311f9c3fe..1b2ba855916 100644 --- a/cmd/builder/test/core.builder.yaml +++ b/cmd/builder/test/core.builder.yaml @@ -38,6 +38,7 @@ replaces: - go.opentelemetry.io/collector/confmap/provider/httpsprovider => ${WORKSPACE_DIR}/confmap/provider/httpsprovider - go.opentelemetry.io/collector/confmap/provider/yamlprovider => ${WORKSPACE_DIR}/confmap/provider/yamlprovider - go.opentelemetry.io/collector/consumer => ${WORKSPACE_DIR}/consumer + - go.opentelemetry.io/collector/consumer/consumertest => ${WORKSPACE_DIR}/consumer/consumertest - go.opentelemetry.io/collector/connector => ${WORKSPACE_DIR}/connector - go.opentelemetry.io/collector/exporter => ${WORKSPACE_DIR}/exporter - go.opentelemetry.io/collector/exporter/debugexporter => ${WORKSPACE_DIR}/exporter/debugexporter diff --git a/cmd/mdatagen/go.mod b/cmd/mdatagen/go.mod index 61486b04514..f52462b4e55 100644 --- a/cmd/mdatagen/go.mod +++ b/cmd/mdatagen/go.mod @@ -10,6 +10,7 @@ require ( go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/confmap/provider/fileprovider v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/filter v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/receiver v0.105.0 @@ -46,8 +47,10 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect @@ -93,3 +96,7 @@ retract ( replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile replace go.opentelemetry.io/collector/internal/globalgates => ../../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/cmd/otelcorecol/builder-config.yaml b/cmd/otelcorecol/builder-config.yaml index 5c9055848f0..e63f908086e 100644 --- a/cmd/otelcorecol/builder-config.yaml +++ b/cmd/otelcorecol/builder-config.yaml @@ -62,6 +62,8 @@ replaces: - go.opentelemetry.io/collector/confmap/provider/httpsprovider => ../../confmap/provider/httpsprovider - go.opentelemetry.io/collector/confmap/provider/yamlprovider => ../../confmap/provider/yamlprovider - go.opentelemetry.io/collector/consumer => ../../consumer + - go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + - go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest - go.opentelemetry.io/collector/connector => ../../connector - go.opentelemetry.io/collector/connector/forwardconnector => ../../connector/forwardconnector - go.opentelemetry.io/collector/exporter => ../../exporter diff --git a/cmd/otelcorecol/go.mod b/cmd/otelcorecol/go.mod index acda68e7df2..9eec0fe1d67 100644 --- a/cmd/otelcorecol/go.mod +++ b/cmd/otelcorecol/go.mod @@ -92,10 +92,13 @@ require ( go.opentelemetry.io/collector/config/configtls v1.12.0 // indirect go.opentelemetry.io/collector/config/internal v0.105.0 // indirect go.opentelemetry.io/collector/consumer v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 // indirect go.opentelemetry.io/collector/extension/auth v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect go.opentelemetry.io/collector/pdata v1.12.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/collector/semconv v0.105.0 // indirect go.opentelemetry.io/collector/service v0.105.0 // indirect go.opentelemetry.io/contrib/config v0.8.0 // indirect @@ -178,6 +181,10 @@ replace go.opentelemetry.io/collector/confmap/provider/yamlprovider => ../../con replace go.opentelemetry.io/collector/consumer => ../../consumer +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest + replace go.opentelemetry.io/collector/connector => ../../connector replace go.opentelemetry.io/collector/connector/forwardconnector => ../../connector/forwardconnector diff --git a/component/componentprofiles/go.mod b/component/componentprofiles/go.mod index 397fce1d779..7d1f8fa6103 100644 --- a/component/componentprofiles/go.mod +++ b/component/componentprofiles/go.mod @@ -2,7 +2,7 @@ module go.opentelemetry.io/collector/component/componentprofiles go 1.21.0 -require go.opentelemetry.io/collector/component v0.104.0 +require go.opentelemetry.io/collector/component v0.105.0 require ( github.com/gogo/protobuf v1.3.2 // indirect diff --git a/component/componenttest/obsreporttest.go b/component/componenttest/obsreporttest.go index ba076c65905..652db62529f 100644 --- a/component/componenttest/obsreporttest.go +++ b/component/componenttest/obsreporttest.go @@ -8,6 +8,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/otel/attribute" otelprom "go.opentelemetry.io/otel/exporters/prometheus" sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" @@ -72,8 +73,10 @@ func (tts *TestTelemetry) CheckExporterLogs(sentLogRecords, sendFailedLogRecords return tts.prometheusChecker.checkExporterLogs(tts.id, sentLogRecords, sendFailedLogRecords) } -func (tts *TestTelemetry) CheckExporterMetricGauge(metric string, val int64) error { - return tts.prometheusChecker.checkExporterMetricGauge(tts.id, metric, val) +func (tts *TestTelemetry) CheckExporterMetricGauge(metric string, val int64, extraAttrs ...attribute.KeyValue) error { + attrs := attributesForExporterMetrics(tts.id) + attrs = append(attrs, extraAttrs...) + return tts.prometheusChecker.checkGauge(metric, val, attrs) } // CheckProcessorTraces checks that for the current exported values for trace exporter metrics match given values. diff --git a/component/componenttest/otelprometheuschecker.go b/component/componenttest/otelprometheuschecker.go index f7a0e548dfe..6a63617c206 100644 --- a/component/componenttest/otelprometheuschecker.go +++ b/component/componenttest/otelprometheuschecker.go @@ -100,10 +100,8 @@ func (pc *prometheusChecker) checkExporterEnqueueFailed(exporter component.ID, d return pc.checkCounter(fmt.Sprintf("exporter_enqueue_failed_%s", datatype), enqueueFailed, exporterAttrs) } -func (pc *prometheusChecker) checkExporterMetricGauge(exporter component.ID, metric string, val int64) error { - exporterAttrs := attributesForExporterMetrics(exporter) - - ts, err := pc.getMetric(metric, io_prometheus_client.MetricType_GAUGE, exporterAttrs) +func (pc *prometheusChecker) checkGauge(metric string, val int64, attrs []attribute.KeyValue) error { + ts, err := pc.getMetric(metric, io_prometheus_client.MetricType_GAUGE, attrs) if err != nil { return err } diff --git a/component/status.go b/component/status.go index 8cd4d802644..60894a217ca 100644 --- a/component/status.go +++ b/component/status.go @@ -11,13 +11,21 @@ type Status int32 // Enumeration of possible component statuses const ( + // StatusNone indicates absence of component status. StatusNone Status = iota + // StatusStarting indicates the component is starting. StatusStarting + // StatusOK indicates the component is running without issues. StatusOK + // StatusRecoverableError indicates that the component has experienced a transient error and may recover. StatusRecoverableError + // StatusPermanentError indicates that the component has detected a condition at runtime that will need human intervention to fix. The collector will continue to run in a degraded mode. StatusPermanentError + // StatusFatalError indicates that the collector has experienced a fatal runtime error and will shut down. StatusFatalError + // StatusStopping indicates that the component is in the process of shutting down. StatusStopping + // StatusStopped indicates that the component has completed shutdown. StatusStopped ) @@ -74,24 +82,26 @@ func NewStatusEvent(status Status) *StatusEvent { } } -// NewRecoverableErrorEvent creates and returns a StatusEvent with StatusRecoverableError, the -// specified error, and a timestamp set to time.Now(). +// NewRecoverableErrorEvent wraps a transient error +// passed as argument as a StatusEvent with a status StatusRecoverableError +// and a timestamp set to time.Now(). func NewRecoverableErrorEvent(err error) *StatusEvent { ev := NewStatusEvent(StatusRecoverableError) ev.err = err return ev } -// NewPermanentErrorEvent creates and returns a StatusEvent with StatusPermanentError, the -// specified error, and a timestamp set to time.Now(). +// NewPermanentErrorEvent wraps an error requiring human intervention to fix +// passed as argument as a StatusEvent with a status StatusPermanentError +// and a timestamp set to time.Now(). func NewPermanentErrorEvent(err error) *StatusEvent { ev := NewStatusEvent(StatusPermanentError) ev.err = err return ev } -// NewFatalErrorEvent creates and returns a StatusEvent with StatusFatalError, the -// specified error, and a timestamp set to time.Now(). +// NewFatalErrorEvent wraps the fatal runtime error passed as argument as a StatusEvent +// with a status StatusFatalError and a timestamp set to time.Now(). func NewFatalErrorEvent(err error) *StatusEvent { ev := NewStatusEvent(StatusFatalError) ev.err = err diff --git a/config/configgrpc/configgrpc.go b/config/configgrpc/configgrpc.go index 2bffbe4993f..951aa93a87c 100644 --- a/config/configgrpc/configgrpc.go +++ b/config/configgrpc/configgrpc.go @@ -17,12 +17,14 @@ import ( "go.opentelemetry.io/otel" "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/component" @@ -188,7 +190,6 @@ type ServerConfig struct { Auth *configauth.Authentication `mapstructure:"auth"` // Include propagates the incoming connection's metadata to downstream consumers. - // Experimental: *NOTE* this option is subject to change or removal in the future. IncludeMetadata bool `mapstructure:"include_metadata"` } @@ -479,7 +480,7 @@ func authUnaryServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServe ctx, err := server.Authenticate(ctx, headers) if err != nil { - return nil, err + return nil, status.Error(codes.Unauthenticated, err.Error()) } return handler(ctx, req) @@ -494,7 +495,7 @@ func authStreamServerInterceptor(srv any, stream grpc.ServerStream, _ *grpc.Stre ctx, err := server.Authenticate(ctx, headers) if err != nil { - return err + return status.Error(codes.Unauthenticated, err.Error()) } return handler(srv, wrapServerStream(ctx, stream)) diff --git a/config/configgrpc/configgrpc_test.go b/config/configgrpc/configgrpc_test.go index 0b2b1cf4cd2..1769733dea6 100644 --- a/config/configgrpc/configgrpc_test.go +++ b/config/configgrpc/configgrpc_test.go @@ -19,8 +19,10 @@ import ( "go.uber.org/zap/zaptest/observer" "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/component" @@ -1022,7 +1024,8 @@ func TestDefaultUnaryInterceptorAuthFailure(t *testing.T) { // verify assert.Nil(t, res) - assert.Equal(t, expectedErr, err) + assert.ErrorContains(t, err, expectedErr.Error()) + assert.Equal(t, codes.Unauthenticated, status.Code(err)) assert.True(t, authCalled) } @@ -1098,7 +1101,8 @@ func TestDefaultStreamInterceptorAuthFailure(t *testing.T) { err := authStreamServerInterceptor(nil, streamServer, &grpc.StreamServerInfo{}, handler, auth.NewServer(auth.WithServerAuthenticate(authFunc))) // verify - assert.Equal(t, expectedErr, err) + assert.ErrorContains(t, err, expectedErr.Error()) // unfortunately, grpc errors don't wrap the original ones + assert.Equal(t, codes.Unauthenticated, status.Code(err)) assert.True(t, authCalled) } diff --git a/config/configgrpc/go.mod b/config/configgrpc/go.mod index 6db35d3bebc..99db60086ae 100644 --- a/config/configgrpc/go.mod +++ b/config/configgrpc/go.mod @@ -105,3 +105,7 @@ replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile replace go.opentelemetry.io/collector/component => ../../component replace go.opentelemetry.io/collector/consumer => ../../consumer + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/config/confighttp/confighttp.go b/config/confighttp/confighttp.go index 2a1960e05fd..5421ddab708 100644 --- a/config/confighttp/confighttp.go +++ b/config/confighttp/confighttp.go @@ -297,7 +297,6 @@ type ServerConfig struct { MaxRequestBodySize int64 `mapstructure:"max_request_body_size"` // IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers - // Experimental: *NOTE* this option is subject to change or removal in the future. IncludeMetadata bool `mapstructure:"include_metadata"` // Additional headers attached to each HTTP response sent to the client. diff --git a/config/confighttp/go.mod b/config/confighttp/go.mod index f8ea2713bef..ca5d8ac8b63 100644 --- a/config/confighttp/go.mod +++ b/config/confighttp/go.mod @@ -98,3 +98,7 @@ replace go.opentelemetry.io/collector/consumer => ../../consumer replace go.opentelemetry.io/collector/pdata/testdata => ../../pdata/testdata replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/config/internal/go.mod b/config/internal/go.mod index 048cc57e806..757d6fff8ff 100644 --- a/config/internal/go.mod +++ b/config/internal/go.mod @@ -37,3 +37,7 @@ replace go.opentelemetry.io/collector/pdata/testdata => ../../pdata/testdata replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile replace go.opentelemetry.io/collector/internal/globalgates => ../../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/confmap/confmap.go b/confmap/confmap.go index f22d3f3f242..59524527b06 100644 --- a/confmap/confmap.go +++ b/confmap/confmap.go @@ -108,9 +108,34 @@ func (l *Conf) Marshal(rawVal any, _ ...MarshalOption) error { return l.Merge(NewFromStringMap(out)) } +func (l *Conf) unsanitizedGet(key string) any { + return l.k.Get(key) +} + +func sanitize(a any) any { + switch m := a.(type) { + case map[string]any: + c := maps.Copy(m) + for k, v := range m { + c[k] = sanitize(v) + } + return c + case []any: + var newSlice []any + for _, e := range m { + newSlice = append(newSlice, sanitize(e)) + } + return newSlice + case expandedValue: + return m.Value + } + return a +} + // Get can retrieve any value given the key to use. func (l *Conf) Get(key string) any { - return l.k.Get(key) + val := l.unsanitizedGet(key) + return sanitize(val) } // IsSet checks to see if the key has been set in any of the data locations. @@ -128,7 +153,7 @@ func (l *Conf) Merge(in *Conf) error { // It returns an error is the sub-config is not a map[string]any (use Get()), and an empty Map if none exists. func (l *Conf) Sub(key string) (*Conf, error) { // Code inspired by the koanf "Cut" func, but returns an error instead of empty map for unsupported sub-config type. - data := l.Get(key) + data := l.unsanitizedGet(key) if data == nil { return New(), nil } @@ -140,9 +165,14 @@ func (l *Conf) Sub(key string) (*Conf, error) { return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v", key, data, reflect.TypeOf(data).Kind()) } +func (l *Conf) toStringMapWithExpand() map[string]any { + m := maps.Unflatten(l.k.All(), KeyDelimiter) + return m +} + // ToStringMap creates a map[string]any from a Parser. func (l *Conf) ToStringMap() map[string]any { - return maps.Unflatten(l.k.All(), KeyDelimiter) + return sanitize(l.toStringMapWithExpand()).(map[string]any) } // decodeConfig decodes the contents of the Conf into the result argument, using a @@ -160,6 +190,7 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler WeaklyTypedInput: !globalgates.StrictlyTypedInputGate.IsEnabled(), MatchName: caseSensitiveMatchName, DecodeHook: mapstructure.ComposeDecodeHookFunc( + useExpandValue(), expandNilStructPointersHookFunc(), mapstructure.StringToSliceHookFunc(","), mapKeyStringToMapKeyTextUnmarshalerHookFunc(), @@ -177,7 +208,7 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler if err != nil { return err } - if err = decoder.Decode(m.ToStringMap()); err != nil { + if err = decoder.Decode(m.toStringMapWithExpand()); err != nil { if strings.HasPrefix(err.Error(), "error decoding ''") { return errors.Unwrap(err) } @@ -206,6 +237,40 @@ func caseSensitiveMatchName(a, b string) bool { return a == b } +func castTo(exp expandedValue, useOriginal bool) (any, error) { + // If the target field is a string, use `exp.Original` or fail if not available. + if globalgates.StrictlyTypedInputGate.IsEnabled() && useOriginal { + if !exp.HasOriginal { + return nil, fmt.Errorf("cannot expand value to string: original value not set") + } + return exp.Original, nil + } + // Otherwise, use the parsed value (previous behavior). + return exp.Value, nil +} + +// When a value has been loaded from an external source via a provider, we keep both the +// parsed value and the original string value. This allows us to expand the value to its +// original string representation when decoding into a string field, and use the original otherwise. +func useExpandValue() mapstructure.DecodeHookFuncType { + return func( + _ reflect.Type, + to reflect.Type, + data any) (any, error) { + if exp, ok := data.(expandedValue); ok { + return castTo(exp, to.Kind() == reflect.String) + } + + // If the target field is a map or slice, sanitize input to remove expandedValue references. + switch to.Kind() { + case reflect.Array, reflect.Slice, reflect.Map: + // This does not handle map[string]string and []string explicitly. + return sanitize(data), nil + } + return data, nil + } +} + // In cases where a config has a mapping of something to a struct pointers // we want nil values to resolve to a pointer to the zero value of the // underlying struct just as we want nil values of a mapping of something diff --git a/confmap/confmap_test.go b/confmap/confmap_test.go index 5a93975ae2a..713583a7115 100644 --- a/confmap/confmap_test.go +++ b/confmap/confmap_test.go @@ -845,3 +845,35 @@ func TestRecursiveUnmarshaling(t *testing.T) { require.NoError(t, conf.Unmarshal(r)) require.Equal(t, "something", r.Foo) } + +func TestExpandedValue(t *testing.T) { + cm := NewFromStringMap(map[string]any{ + "key": expandedValue{ + Value: 0xdeadbeef, + HasOriginal: true, + Original: "original", + }}) + assert.Equal(t, 0xdeadbeef, cm.Get("key")) + assert.Equal(t, map[string]any{"key": 0xdeadbeef}, cm.ToStringMap()) + + type ConfigStr struct { + Key string `mapstructure:"key"` + } + + cfgStr := ConfigStr{} + assert.NoError(t, cm.Unmarshal(&cfgStr)) + assert.Equal(t, "original", cfgStr.Key) + + type ConfigInt struct { + Key int `mapstructure:"key"` + } + cfgInt := ConfigInt{} + assert.NoError(t, cm.Unmarshal(&cfgInt)) + assert.Equal(t, 0xdeadbeef, cfgInt.Key) + + type ConfigBool struct { + Key bool `mapstructure:"key"` + } + cfgBool := ConfigBool{} + assert.Error(t, cm.Unmarshal(&cfgBool)) +} diff --git a/confmap/converter/expandconverter/expand.go b/confmap/converter/expandconverter/expand.go index e603ca9ce04..2c4af613499 100644 --- a/confmap/converter/expandconverter/expand.go +++ b/confmap/converter/expandconverter/expand.go @@ -85,6 +85,7 @@ func (c converter) expandEnv(s string) (string, error) { // - $FOO will be substituted with env var FOO // - $$FOO will be replaced with $FOO // - $$$FOO will be replaced with $ + substituted env var FOO + // TODO: Move the escaping of $$ out from the expand converter to the resolver. if str == "$" { return "$" } diff --git a/confmap/expand.go b/confmap/expand.go index d006686bc2a..56dc512382b 100644 --- a/confmap/expand.go +++ b/confmap/expand.go @@ -28,7 +28,7 @@ var ( ) func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any, error) { - for i := 0; i < 100; i++ { + for i := 0; i < 1000; i++ { val, changed, err := mr.expandValue(ctx, value) if err != nil { return nil, err @@ -43,6 +43,40 @@ func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any, func (mr *Resolver) expandValue(ctx context.Context, value any) (any, bool, error) { switch v := value.(type) { + case expandedValue: + expanded, changed, err := mr.expandValue(ctx, v.Value) + if err != nil { + return nil, false, err + } + + switch exp := expanded.(type) { + case expandedValue, string: + // Return expanded values or strings verbatim. + return exp, changed, nil + } + + // At this point we don't know the target field type, so we need to expand the original representation as well. + originalExpanded, originalChanged, err := mr.expandValue(ctx, v.Original) + if err != nil { + return nil, false, err + } + + if originalExpanded, ok := originalExpanded.(string); ok { + // If the original representation is a string, return the expanded value with the original representation. + return expandedValue{ + Value: expanded, + Original: originalExpanded, + HasOriginal: true, + }, changed || originalChanged, nil + } + + result := expandedValue{ + Value: expanded, + Original: v.Original, + HasOriginal: v.HasOriginal, + } + + return result, changed || originalChanged, nil case string: if !strings.Contains(v, "${") || !strings.Contains(v, "}") { // No URIs to expand. @@ -117,6 +151,20 @@ func (mr *Resolver) findURI(input string) string { return input[openIndex : closeIndex+1] } +// expandedValue holds the YAML parsed value and original representation of a value. +// It keeps track of the original representation to be used by the 'useExpandValue' hook +// if the target field is a string. We need to keep both representations because we don't know +// what the target field type is until `Unmarshal` is called. +type expandedValue struct { + // Value is the expanded value. + Value any + // HasOriginal is true if the original representation is set. + HasOriginal bool + // Original is the original representation of the value. + // It is only valid if HasOriginal is true. + Original string +} + // findAndExpandURI attempts to find and expand the first occurrence of an expandable URI in input. If an expandable URI is found it // returns the input with the URI expanded, true and nil. Otherwise, it returns the unchanged input, false and the expanding error. // This method expects input to start with ${ and end with } @@ -134,10 +182,17 @@ func (mr *Resolver) findAndExpandURI(ctx context.Context, input string) (any, bo return input, false, err } - expanded, err := ret.AsRaw() + expanded := expandedValue{} + expanded.Value, err = ret.AsRaw() if err != nil { return input, false, err } + + if asStr, err2 := ret.AsString(); err2 == nil { + expanded.HasOriginal = true + expanded.Original = asStr + } + return expanded, true, err } expanded, err := mr.expandURI(ctx, uri) diff --git a/confmap/expand_test.go b/confmap/expand_test.go index dd406922948..53b244374b1 100644 --- a/confmap/expand_test.go +++ b/confmap/expand_test.go @@ -577,46 +577,3 @@ func TestResolverDefaultProviderExpand(t *testing.T) { require.NoError(t, err) assert.Equal(t, map[string]any{"foo": "localhost"}, cfgMap.ToStringMap()) } - -func Test_EscapedEnvVars(t *testing.T) { - const mapValue2 = "some map value" - - expectedMap := map[string]any{ - "test_map": map[string]any{ - "recv.1": "$MAP_VALUE_1", - "recv.2": "$$MAP_VALUE_2", - "recv.3": "$$MAP_VALUE_3", - "recv.4": "$" + mapValue2, - "recv.5": "some${MAP_VALUE_4}text", - "recv.6": "${ONE}${TWO}", - "recv.7": "text$", - "recv.8": "$", - "recv.9": "${1}${env:2}", - "recv.10": "some${env:MAP_VALUE_4}text", - "recv.11": "${env:" + mapValue2 + "}", - "recv.12": "${env:${MAP_VALUE_2}}", - "recv.13": "env:MAP_VALUE_2}${MAP_VALUE_2}{", - "recv.14": "${env:MAP_VALUE_2${MAP_VALUE_2}", - "recv.15": "$" + mapValue2, - }} - - fileProvider := newFakeProvider("file", func(_ context.Context, uri string, _ WatcherFunc) (*Retrieved, error) { - return NewRetrieved(newConfFromFile(t, uri[5:])) - }) - envProvider := newFakeProvider("env", func(_ context.Context, uri string, _ WatcherFunc) (*Retrieved, error) { - if uri == "env:MAP_VALUE_2" { - return NewRetrieved(mapValue2) - } - return nil, errors.New("should not be expanding any other env vars") - }) - - resolver, err := NewResolver(ResolverSettings{URIs: []string{filepath.Join("testdata", "expand-escaped-env.yaml")}, ProviderFactories: []ProviderFactory{fileProvider, envProvider}, ConverterFactories: nil, DefaultScheme: "env"}) - require.NoError(t, err) - - // Test that expanded configs are the same with the simple config with no env vars. - cfgMap, err := resolver.Resolve(context.Background()) - require.NoError(t, err) - m := cfgMap.ToStringMap() - assert.Equal(t, expectedMap, m) - -} diff --git a/confmap/internal/e2e/expand_test.go b/confmap/internal/e2e/expand_test.go new file mode 100644 index 00000000000..018725d30d5 --- /dev/null +++ b/confmap/internal/e2e/expand_test.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package e2etest + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/converter/expandconverter" + "go.opentelemetry.io/collector/confmap/provider/envprovider" + "go.opentelemetry.io/collector/confmap/provider/fileprovider" + "go.opentelemetry.io/collector/confmap/provider/yamlprovider" +) + +// Test_EscapedEnvVars tests that the resolver supports escaped env vars working together with expand converter. +func Test_EscapedEnvVars(t *testing.T) { + tests := []struct { + name string + scheme string + }{ + { + name: "no_default_scheme", + scheme: "", + }, + { + name: "env", + scheme: "env", + }, + } + + const expandedValue = "some expanded value" + t.Setenv("ENV_VALUE", expandedValue) + + expectedFailures := map[string]string{ + "$ENV_VALUE": "variable substitution using $VAR has been deprecated in favor of ${VAR} and ${env:VAR}", + "$$$ENV_VALUE": "variable substitution using $VAR has been deprecated in favor of ${VAR} and ${env:VAR}", + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expectedMap := map[string]any{ + "test_map": map[string]any{ + "key1": "$ENV_VALUE", + "key2": "$$ENV_VALUE", + "key3": "$" + expandedValue, + "key4": "some" + expandedValue + "text", + "key5": "some${ENV_VALUE}text", + "key6": "${ONE}${TWO}", + "key7": "text$", + "key8": "$", + "key9": "${1}${env:2}", + "key10": "some${env:ENV_VALUE}text", + "key11": "${env:" + expandedValue + "}", + "key12": "${env:${ENV_VALUE}}", + "key13": "env:MAP_VALUE_2}${ENV_VALUE}{", + "key14": "$" + expandedValue, + }, + } + + resolver, err := confmap.NewResolver(confmap.ResolverSettings{ + URIs: []string{filepath.Join("testdata", "expand-escaped-env.yaml")}, + ProviderFactories: []confmap.ProviderFactory{fileprovider.NewFactory(), envprovider.NewFactory()}, + ConverterFactories: []confmap.ConverterFactory{expandconverter.NewFactory()}, + DefaultScheme: tt.scheme, + }) + require.NoError(t, err) + + // Test that expanded configs are the same with the simple config with no env vars. + cfgMap, err := resolver.Resolve(context.Background()) + require.NoError(t, err) + m := cfgMap.ToStringMap() + assert.Equal(t, expectedMap, m) + + for val, expectedErr := range expectedFailures { + resolver, err = confmap.NewResolver(confmap.ResolverSettings{ + URIs: []string{fmt.Sprintf("yaml: test: %s", val)}, + ProviderFactories: []confmap.ProviderFactory{yamlprovider.NewFactory(), envprovider.NewFactory()}, + ConverterFactories: []confmap.ConverterFactory{expandconverter.NewFactory()}, + DefaultScheme: tt.scheme, + }) + require.NoError(t, err) + _, err := resolver.Resolve(context.Background()) + require.ErrorContains(t, err, expectedErr) + } + }) + } +} diff --git a/confmap/internal/e2e/fuzz_test.go b/confmap/internal/e2e/fuzz_test.go new file mode 100644 index 00000000000..462f66a484e --- /dev/null +++ b/confmap/internal/e2e/fuzz_test.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package e2etest + +import ( + "context" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// targetNested tests the following property: +// > Passing a value of type T directly through an environment variable +// > should be equivalent to passing it through a nested environment variable. +func targetNested[T any](t *testing.T, value string) { + resolver := NewResolver(t, "types_expand.yaml") + + // Use os.Setenv so we can check the error and return instead of failing the fuzzing. + os.Setenv("ENV", "${env:ENV2}") // nolint:tenv + defer os.Unsetenv("ENV") + err := os.Setenv("ENV2", value) // nolint:tenv + defer os.Unsetenv("ENV2") + if err != nil { + return + } + confNested, errResolveNested := resolver.Resolve(context.Background()) + + err = os.Setenv("ENV", value) // nolint:tenv + if err != nil { + return + } + confSimple, errResolveSimple := resolver.Resolve(context.Background()) + require.Equal(t, errResolveNested, errResolveSimple) + if errResolveNested != nil { + return + } + + var cfgNested TargetConfig[T] + errNested := confNested.Unmarshal(cfgNested) + + var cfgSimple TargetConfig[T] + errSimple := confSimple.Unmarshal(cfgSimple) + + require.Equal(t, errNested, errSimple) + if errNested != nil { + return + } + assert.Equal(t, cfgNested, cfgSimple) +} + +// testStrings for fuzzing targets +var testStrings = []string{ + "123", + "opentelemetry", + "!!str 123", + "\"0123\"", + "\"", + "1111:1111:1111:1111:1111::", + "{field: value}", + "0xdeadbeef", + "0b101", + "field:", + "2006-01-02T15:04:05Z07:00", +} + +func FuzzNestedString(f *testing.F) { + for _, value := range testStrings { + f.Add(value) + } + f.Fuzz(targetNested[string]) +} + +func FuzzNestedInt(f *testing.F) { + for _, value := range testStrings { + f.Add(value) + } + f.Fuzz(targetNested[int]) +} + +func FuzzNestedMap(f *testing.F) { + for _, value := range testStrings { + f.Add(value) + } + f.Fuzz(targetNested[map[string]any]) +} diff --git a/confmap/internal/e2e/go.mod b/confmap/internal/e2e/go.mod index 705f96795f8..4e27927b911 100644 --- a/confmap/internal/e2e/go.mod +++ b/confmap/internal/e2e/go.mod @@ -5,8 +5,10 @@ go 1.21.0 require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/confmap v0.105.0 + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.105.0 go.opentelemetry.io/collector/confmap/provider/envprovider v0.105.0 go.opentelemetry.io/collector/confmap/provider/fileprovider v0.105.0 + go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.105.0 go.opentelemetry.io/collector/featuregate v1.12.0 go.opentelemetry.io/collector/internal/globalgates v0.105.0 ) @@ -32,6 +34,10 @@ replace go.opentelemetry.io/collector/confmap/provider/fileprovider => ../../pro replace go.opentelemetry.io/collector/confmap/provider/envprovider => ../../provider/envprovider +replace go.opentelemetry.io/collector/confmap/provider/yamlprovider => ../../provider/yamlprovider + replace go.opentelemetry.io/collector/featuregate => ../../../featuregate replace go.opentelemetry.io/collector/internal/globalgates => ../../../internal/globalgates + +replace go.opentelemetry.io/collector/confmap/converter/expandconverter => ../../converter/expandconverter diff --git a/confmap/internal/e2e/testdata/expand-escaped-env.yaml b/confmap/internal/e2e/testdata/expand-escaped-env.yaml new file mode 100644 index 00000000000..ae8cb280ede --- /dev/null +++ b/confmap/internal/e2e/testdata/expand-escaped-env.yaml @@ -0,0 +1,29 @@ +test_map: + # $$ -> escaped $ + key1: "$$ENV_VALUE" + # $$$$ -> two escaped $ + key2: "$$$$ENV_VALUE" + # $$ -> escaped $ + ${ENV_VALUE} expanded + key3: "$$${ENV_VALUE}" + # expanded in the middle + key4: "some${ENV_VALUE}text" + # escaped $ in the middle + key5: "some$${ENV_VALUE}text" + # two escaped $ + key6: "$${ONE}$${TWO}" + # trailing escaped $ + key7: "text$$" + # escaped $ alone + key8: "$$" + # escaped number and uri + key9: "$${1}$${env:2}" + # escape provider + key10: "some$${env:ENV_VALUE}text" + # can escape outer when nested + key11: "$${env:${ENV_VALUE}}" + # can escape inner and outer when nested + key12: "$${env:$${ENV_VALUE}}" + # can escape partial + key13: "env:MAP_VALUE_2}$${ENV_VALUE}{" + # $$$ -> escaped $ + expanded env var + key14: "$$${env:ENV_VALUE}" diff --git a/confmap/internal/e2e/types_test.go b/confmap/internal/e2e/types_test.go index aee111de7c3..30b763d8be1 100644 --- a/confmap/internal/e2e/types_test.go +++ b/confmap/internal/e2e/types_test.go @@ -38,6 +38,18 @@ type TargetConfig[T any] struct { Field T `mapstructure:"field"` } +func NewResolver(t testing.TB, path string) *confmap.Resolver { + resolver, err := confmap.NewResolver(confmap.ResolverSettings{ + URIs: []string{filepath.Join("testdata", path)}, + ProviderFactories: []confmap.ProviderFactory{ + fileprovider.NewFactory(), + envprovider.NewFactory(), + }, + }) + require.NoError(t, err) + return resolver +} + func AssertExpectedMatch[T any](t *testing.T, tt Test, conf *confmap.Conf, cfg *TargetConfig[T]) { err := conf.Unmarshal(cfg) if tt.unmarshalErr != "" { @@ -48,6 +60,29 @@ func AssertExpectedMatch[T any](t *testing.T, tt Test, conf *confmap.Conf, cfg * require.Equal(t, tt.expected, cfg.Field) } +func AssertResolvesTo(t *testing.T, resolver *confmap.Resolver, tt Test) { + conf, err := resolver.Resolve(context.Background()) + if tt.resolveErr != "" { + require.ErrorContains(t, err, tt.resolveErr) + return + } + require.NoError(t, err) + + switch tt.targetField { + case TargetFieldInt: + var cfg TargetConfig[int] + AssertExpectedMatch(t, tt, conf, &cfg) + case TargetFieldString, TargetFieldInlineString: + var cfg TargetConfig[string] + AssertExpectedMatch(t, tt, conf, &cfg) + case TargetFieldBool: + var cfg TargetConfig[bool] + AssertExpectedMatch(t, tt, conf, &cfg) + default: + t.Fatalf("unexpected target field %q", tt.targetField) + } +} + func TestTypeCasting(t *testing.T) { values := []Test{ { @@ -170,6 +205,16 @@ func TestTypeCasting(t *testing.T) { targetField: TargetFieldInlineString, expected: "inline field with 1111:1111:1111:1111:1111:: expansion", }, + { + value: "2006-01-02T15:04:05Z07:00", + targetField: TargetFieldString, + expected: "2006-01-02T15:04:05Z07:00", + }, + { + value: "2006-01-02T15:04:05Z07:00", + targetField: TargetFieldInlineString, + expected: "inline field with 2006-01-02T15:04:05Z07:00 expansion", + }, } previousValue := globalgates.StrictlyTypedInputGate.IsEnabled() @@ -186,34 +231,9 @@ func TestTypeCasting(t *testing.T) { if tt.targetField == TargetFieldInlineString { testFile = "types_expand_inline.yaml" } - - resolver, err := confmap.NewResolver(confmap.ResolverSettings{ - URIs: []string{filepath.Join("testdata", testFile)}, - ProviderFactories: []confmap.ProviderFactory{ - fileprovider.NewFactory(), - envprovider.NewFactory(), - }, - }) - require.NoError(t, err) + resolver := NewResolver(t, testFile) t.Setenv("ENV", tt.value) - - conf, err := resolver.Resolve(context.Background()) - require.NoError(t, err) - - switch tt.targetField { - case TargetFieldInt: - var cfg TargetConfig[int] - AssertExpectedMatch(t, tt, conf, &cfg) - case TargetFieldString, TargetFieldInlineString: - var cfg TargetConfig[string] - AssertExpectedMatch(t, tt, conf, &cfg) - case TargetFieldBool: - var cfg TargetConfig[bool] - AssertExpectedMatch(t, tt, conf, &cfg) - default: - t.Fatalf("unexpected target field %q", tt.targetField) - } - + AssertResolvesTo(t, resolver, tt) }) } } @@ -226,9 +246,9 @@ func TestStrictTypeCasting(t *testing.T) { expected: 123, }, { - value: "123", - targetField: TargetFieldString, - unmarshalErr: "'field' expected type 'string', got unconvertible type 'int', value: '123'", + value: "123", + targetField: TargetFieldString, + expected: "123", }, { value: "123", @@ -241,9 +261,9 @@ func TestStrictTypeCasting(t *testing.T) { expected: 83, }, { - value: "0123", - targetField: TargetFieldString, - unmarshalErr: "'field' expected type 'string', got unconvertible type 'int', value: '83'", + value: "0123", + targetField: TargetFieldString, + expected: "0123", }, { value: "0123", @@ -256,9 +276,9 @@ func TestStrictTypeCasting(t *testing.T) { expected: 3735928559, }, { - value: "0xdeadbeef", - targetField: TargetFieldString, - unmarshalErr: "'field' expected type 'string', got unconvertible type 'int', value: '3735928559'", + value: "0xdeadbeef", + targetField: TargetFieldString, + expected: "0xdeadbeef", }, { value: "0xdeadbeef", @@ -268,27 +288,27 @@ func TestStrictTypeCasting(t *testing.T) { { value: "\"0123\"", targetField: TargetFieldString, - expected: "0123", + expected: "\"0123\"", }, { value: "\"0123\"", targetField: TargetFieldInt, - unmarshalErr: "'field' expected type 'int', got unconvertible type 'string', value: '0123'", + unmarshalErr: "'field' expected type 'int', got unconvertible type 'string', value: '\"0123\"'", }, { value: "\"0123\"", targetField: TargetFieldInlineString, - expected: "inline field with 0123 expansion", + expected: "inline field with \"0123\" expansion", }, { value: "!!str 0123", targetField: TargetFieldString, - expected: "0123", + expected: "!!str 0123", }, { value: "!!str 0123", targetField: TargetFieldInlineString, - expected: "inline field with 0123 expansion", + expected: "inline field with !!str 0123 expansion", }, { value: "t", @@ -311,9 +331,19 @@ func TestStrictTypeCasting(t *testing.T) { expected: "inline field with 1111:1111:1111:1111:1111:: expansion", }, { - value: "1111:1111:1111:1111:1111::", - targetField: TargetFieldString, - unmarshalErr: "'field' expected type 'string', got unconvertible type 'map[string]interface {}', value: 'map[1111:1111:1111:1111:1111::]'", + value: "1111:1111:1111:1111:1111::", + targetField: TargetFieldString, + expected: "1111:1111:1111:1111:1111::", + }, + { + value: "2006-01-02T15:04:05Z07:00", + targetField: TargetFieldString, + expected: "2006-01-02T15:04:05Z07:00", + }, + { + value: "2006-01-02T15:04:05Z07:00", + targetField: TargetFieldInlineString, + expected: "inline field with 2006-01-02T15:04:05Z07:00 expansion", }, } @@ -326,43 +356,130 @@ func TestStrictTypeCasting(t *testing.T) { }() for _, tt := range values { - t.Run(tt.value+"/"+string(tt.targetField), func(t *testing.T) { + t.Run(tt.value+"/"+string(tt.targetField)+"/"+"direct", func(t *testing.T) { testFile := "types_expand.yaml" if tt.targetField == TargetFieldInlineString { testFile = "types_expand_inline.yaml" } - - resolver, err := confmap.NewResolver(confmap.ResolverSettings{ - URIs: []string{filepath.Join("testdata", testFile)}, - ProviderFactories: []confmap.ProviderFactory{ - fileprovider.NewFactory(), - envprovider.NewFactory(), - }, - }) - require.NoError(t, err) + resolver := NewResolver(t, testFile) t.Setenv("ENV", tt.value) + AssertResolvesTo(t, resolver, tt) + }) - conf, err := resolver.Resolve(context.Background()) - if tt.resolveErr != "" { - require.ErrorContains(t, err, tt.resolveErr) - return + t.Run(tt.value+"/"+string(tt.targetField)+"/"+"indirect", func(t *testing.T) { + testFile := "types_expand.yaml" + if tt.targetField == TargetFieldInlineString { + testFile = "types_expand_inline.yaml" } - require.NoError(t, err) - switch tt.targetField { - case TargetFieldInt: - var cfg TargetConfig[int] - AssertExpectedMatch(t, tt, conf, &cfg) - case TargetFieldString, TargetFieldInlineString: - var cfg TargetConfig[string] - AssertExpectedMatch(t, tt, conf, &cfg) - case TargetFieldBool: - var cfg TargetConfig[bool] - AssertExpectedMatch(t, tt, conf, &cfg) - default: - t.Fatalf("unexpected target field %q", tt.targetField) + resolver := NewResolver(t, testFile) + t.Setenv("ENV", "${env:ENV2}") + t.Setenv("ENV2", tt.value) + AssertResolvesTo(t, resolver, tt) + }) + } +} + +func TestRecursiveInlineString(t *testing.T) { + values := []Test{ + { + value: "123", + targetField: TargetFieldString, + expected: "The value The value 123 is wrapped is wrapped", + }, + { + value: "123", + targetField: TargetFieldInlineString, + expected: "inline field with The value The value 123 is wrapped is wrapped expansion", + }, + { + value: "opentelemetry", + targetField: TargetFieldString, + expected: "The value The value opentelemetry is wrapped is wrapped", + }, + { + value: "opentelemetry", + targetField: TargetFieldInlineString, + expected: "inline field with The value The value opentelemetry is wrapped is wrapped expansion", + }, + } + + previousValue := globalgates.StrictlyTypedInputGate.IsEnabled() + err := featuregate.GlobalRegistry().Set(globalgates.StrictlyTypedInputID, true) + require.NoError(t, err) + defer func() { + err := featuregate.GlobalRegistry().Set(globalgates.StrictlyTypedInputID, previousValue) + require.NoError(t, err) + }() + + for _, tt := range values { + t.Run(tt.value+"/"+string(tt.targetField), func(t *testing.T) { + testFile := "types_expand.yaml" + if tt.targetField == TargetFieldInlineString { + testFile = "types_expand_inline.yaml" } + resolver := NewResolver(t, testFile) + t.Setenv("ENV", "The value ${env:ENV2} is wrapped") + t.Setenv("ENV2", "The value ${env:ENV3} is wrapped") + t.Setenv("ENV3", tt.value) + AssertResolvesTo(t, resolver, tt) }) } } + +func TestRecursiveMaps(t *testing.T) { + value := "{value: 123}" + + previousValue := globalgates.StrictlyTypedInputGate.IsEnabled() + err := featuregate.GlobalRegistry().Set(globalgates.StrictlyTypedInputID, true) + require.NoError(t, err) + defer func() { + seterr := featuregate.GlobalRegistry().Set(globalgates.StrictlyTypedInputID, previousValue) + require.NoError(t, seterr) + }() + + resolver := NewResolver(t, "types_expand.yaml") + t.Setenv("ENV", `{env: "${env:ENV2}", inline: "inline ${env:ENV2}"}`) + t.Setenv("ENV2", `{env2: "${env:ENV3}"}`) + t.Setenv("ENV3", value) + conf, err := resolver.Resolve(context.Background()) + require.NoError(t, err) + + type Value struct { + Value int `mapstructure:"value"` + } + type ENV2 struct { + Env2 Value `mapstructure:"env2"` + } + type ENV struct { + Env ENV2 `mapstructure:"env"` + Inline string `mapstructure:"inline"` + } + type Target struct { + Field ENV `mapstructure:"field"` + } + + var cfg Target + err = conf.Unmarshal(&cfg) + require.NoError(t, err) + require.Equal(t, + Target{Field: ENV{ + Env: ENV2{ + Env2: Value{ + Value: 123, + }}, + Inline: "inline {env2: \"{value: 123}\"}", + }}, + cfg, + ) + + confStr, err := resolver.Resolve(context.Background()) + require.NoError(t, err) + var cfgStr TargetConfig[string] + err = confStr.Unmarshal(&cfgStr) + require.NoError(t, err) + require.Equal(t, `{env: "{env2: "{value: 123}"}", inline: "inline {env2: "{value: 123}"}"}`, + cfgStr.Field, + ) +} diff --git a/confmap/provider.go b/confmap/provider.go index 161e2473971..3338d72bddf 100644 --- a/confmap/provider.go +++ b/confmap/provider.go @@ -9,6 +9,8 @@ import ( "go.uber.org/zap" "gopkg.in/yaml.v3" + + "go.opentelemetry.io/collector/internal/globalgates" ) // ProviderSettings are the settings to initialize a Provider. @@ -140,7 +142,11 @@ func NewRetrievedFromYAML(yamlBytes []byte, opts ...RetrievedOption) (*Retrieved switch v := rawConf.(type) { case string: - opts = append(opts, withStringRepresentation(v)) + val := v + if globalgates.StrictlyTypedInputGate.IsEnabled() { + val = string(yamlBytes) + } + return NewRetrieved(val, append(opts, withStringRepresentation(val))...) case int, int32, int64, float32, float64, bool, map[string]any: opts = append(opts, withStringRepresentation(string(yamlBytes))) } diff --git a/confmap/provider/internal/configurablehttpprovider/provider.go b/confmap/provider/internal/configurablehttpprovider/provider.go index f5bac2c6d51..2d968e3bafe 100644 --- a/confmap/provider/internal/configurablehttpprovider/provider.go +++ b/confmap/provider/internal/configurablehttpprovider/provider.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "net/http" + "net/url" "os" "path/filepath" "strings" @@ -84,6 +85,10 @@ func (fmp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFu return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, string(fmp.scheme)) } + if _, err := url.ParseRequestURI(uri); err != nil { + return nil, fmt.Errorf("invalid uri %q: %w", uri, err) + } + client, err := fmp.createClient() if err != nil { diff --git a/confmap/provider/internal/configurablehttpprovider/provider_test.go b/confmap/provider/internal/configurablehttpprovider/provider_test.go index 125c1cbdd80..0561d51030a 100644 --- a/confmap/provider/internal/configurablehttpprovider/provider_test.go +++ b/confmap/provider/internal/configurablehttpprovider/provider_test.go @@ -297,9 +297,31 @@ func TestValidateProviderScheme(t *testing.T) { assert.NoError(t, confmaptest.ValidateProviderScheme(New(HTTPScheme, confmaptest.NewNopProviderSettings()))) } -func TestInvalidTransport(t *testing.T) { - fp := New("foo", confmaptest.NewNopProviderSettings()) +func TestInvalidURI(t *testing.T) { + fp := New(HTTPScheme, confmaptest.NewNopProviderSettings()) - _, err := fp.Retrieve(context.Background(), "foo://..", nil) - assert.Error(t, err) + tests := []struct { + uri string + err string + }{ + { + uri: "foo://..", + err: "uri is not supported by \"http\" provider", + }, + { + uri: "http://", + err: "no Host in request URL", + }, + { + uri: "http://{}", + err: "invalid character \"{\" in host name", + }, + } + + for _, tt := range tests { + t.Run(tt.uri, func(t *testing.T) { + _, err := fp.Retrieve(context.Background(), tt.uri, nil) + assert.ErrorContains(t, err, tt.err) + }) + } } diff --git a/confmap/provider_test.go b/confmap/provider_test.go index ebbd5562ec3..e168b49fa6a 100644 --- a/confmap/provider_test.go +++ b/confmap/provider_test.go @@ -85,8 +85,8 @@ func TestNewRetrievedFromYAMLString(t *testing.T) { }, { yaml: "\"string\"", - value: "string", - altStrRepr: "string", + value: "\"string\"", + altStrRepr: "\"string\"", }, { yaml: "123", diff --git a/confmap/resolver.go b/confmap/resolver.go index ca197076327..7b7de3d5092 100644 --- a/confmap/resolver.go +++ b/confmap/resolver.go @@ -12,8 +12,6 @@ import ( "go.uber.org/multierr" "go.uber.org/zap" - - "go.opentelemetry.io/collector/internal/globalgates" ) // follows drive-letter specification: @@ -169,17 +167,11 @@ func (mr *Resolver) Resolve(ctx context.Context) (*Conf, error) { cfgMap := make(map[string]any) for _, k := range retMap.AllKeys() { - val, err := mr.expandValueRecursively(ctx, retMap.Get(k)) + val, err := mr.expandValueRecursively(ctx, retMap.unsanitizedGet(k)) if err != nil { return nil, err } - - if v, ok := val.(string); ok && globalgates.UseUnifiedEnvVarExpansionRules.IsEnabled() { - cfgMap[k] = strings.ReplaceAll(v, "$$", "$") - } else { - cfgMap[k] = val - } - + cfgMap[k] = val } retMap = NewFromStringMap(cfgMap) diff --git a/confmap/testdata/expand-escaped-env.yaml b/confmap/testdata/expand-escaped-env.yaml deleted file mode 100644 index 6b2cd162831..00000000000 --- a/confmap/testdata/expand-escaped-env.yaml +++ /dev/null @@ -1,31 +0,0 @@ -test_map: - # $$ -> escaped $ - recv.1: "$$MAP_VALUE_1" - # $$$ -> escaped $ + $MAP_VALUE_2 - recv.2: "$$$MAP_VALUE_2" - # $$$$ -> two escaped $ - recv.3: "$$$$MAP_VALUE_3" - # $$$ -> escaped $ + substituted env var - recv.4: "$$${MAP_VALUE_2}" - # escaped $ in the middle - recv.5: "some$${MAP_VALUE_4}text" - # two escaped $ - recv.6: "$${ONE}$${TWO}" - # trailing escaped $ - recv.7: "text$$" - # escaped $ alone - recv.8: "$$" - # Escape numbers - recv.9: "$${1}$${env:2}" - # can escape provider - recv.10: "some$${env:MAP_VALUE_4}text" - # can escape outer when nested - recv.11: "$${env:${MAP_VALUE_2}}" - # can escape inner and outer when nested - recv.12: "$${env:$${MAP_VALUE_2}}" - # can escape partial - recv.13: "env:MAP_VALUE_2}$${MAP_VALUE_2}{" - # can escape partial - recv.14: "${env:MAP_VALUE_2$${MAP_VALUE_2}" - # $$$ -> escaped $ + substituted env var - recv.15: "$$${env:MAP_VALUE_2}" diff --git a/connector/forwardconnector/go.mod b/connector/forwardconnector/go.mod index f42d7423b3a..70286d54f11 100644 --- a/connector/forwardconnector/go.mod +++ b/connector/forwardconnector/go.mod @@ -8,6 +8,7 @@ require ( go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/connector v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.uber.org/goleak v1.3.0 ) @@ -38,8 +39,10 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector v0.105.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect @@ -83,3 +86,7 @@ retract ( replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/configtelemetry replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/connector/go.mod b/connector/go.mod index 877268cf09b..eedaf4307b5 100644 --- a/connector/go.mod +++ b/connector/go.mod @@ -8,6 +8,7 @@ require ( go.opentelemetry.io/collector v0.105.0 go.opentelemetry.io/collector/component v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/pdata/testdata v0.105.0 go.uber.org/goleak v1.3.0 @@ -32,6 +33,7 @@ require ( github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect @@ -67,3 +69,7 @@ replace go.opentelemetry.io/collector/pdata/testdata => ../pdata/testdata replace go.opentelemetry.io/collector/pdata/pprofile => ../pdata/pprofile replace go.opentelemetry.io/collector/internal/globalgates => ../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../consumer/consumertest diff --git a/consumer/consumertest/Makefile b/consumer/consumertest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/consumer/consumertest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/consumer/consumertest/consumer.go b/consumer/consumertest/consumer.go index 147ed55c7ed..4b699b9449a 100644 --- a/consumer/consumertest/consumer.go +++ b/consumer/consumertest/consumer.go @@ -7,8 +7,10 @@ import ( "context" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerprofiles" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -29,12 +31,16 @@ type Consumer interface { // ConsumeLogs to implement the consumer.Logs. ConsumeLogs(context.Context, plog.Logs) error + // ConsumeProfiles to implement the consumerprofiles.Profiles. + ConsumeProfiles(context.Context, pprofile.Profiles) error + unexported() } var _ consumer.Logs = (Consumer)(nil) var _ consumer.Metrics = (Consumer)(nil) var _ consumer.Traces = (Consumer)(nil) +var _ consumerprofiles.Profiles = (Consumer)(nil) type nonMutatingConsumer struct{} @@ -48,6 +54,7 @@ type baseConsumer struct { consumer.ConsumeTracesFunc consumer.ConsumeMetricsFunc consumer.ConsumeLogsFunc + consumerprofiles.ConsumeProfilesFunc } func (bc baseConsumer) unexported() {} diff --git a/consumer/consumertest/err.go b/consumer/consumertest/err.go index d147453aaf7..fdc54ae2452 100644 --- a/consumer/consumertest/err.go +++ b/consumer/consumertest/err.go @@ -7,14 +7,16 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) // NewErr returns a Consumer that just drops all received data and returns the specified error to Consume* callers. func NewErr(err error) Consumer { return &baseConsumer{ - ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return err }, - ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return err }, - ConsumeLogsFunc: func(context.Context, plog.Logs) error { return err }, + ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return err }, + ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return err }, + ConsumeLogsFunc: func(context.Context, plog.Logs) error { return err }, + ConsumeProfilesFunc: func(context.Context, pprofile.Profiles) error { return err }, } } diff --git a/consumer/consumertest/err_test.go b/consumer/consumertest/err_test.go index 692347becfb..3d88ecf18aa 100644 --- a/consumer/consumertest/err_test.go +++ b/consumer/consumertest/err_test.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -24,4 +25,5 @@ func TestErr(t *testing.T) { assert.Equal(t, err, ec.ConsumeLogs(context.Background(), plog.NewLogs())) assert.Equal(t, err, ec.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.Equal(t, err, ec.ConsumeTraces(context.Background(), ptrace.NewTraces())) + assert.Equal(t, err, ec.ConsumeProfiles(context.Background(), pprofile.NewProfiles())) } diff --git a/consumer/consumertest/go.mod b/consumer/consumertest/go.mod new file mode 100644 index 00000000000..24f5ecec4a0 --- /dev/null +++ b/consumer/consumertest/go.mod @@ -0,0 +1,40 @@ +module go.opentelemetry.io/collector/consumer/consumertest + +go 1.21.0 + +replace go.opentelemetry.io/collector/consumer => ../ + +require ( + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 + go.opentelemetry.io/collector/pdata v1.12.0 + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 + go.opentelemetry.io/collector/pdata/testdata v0.105.0 + go.uber.org/goleak v1.3.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/pdata => ../../pdata + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumerprofiles + +replace go.opentelemetry.io/collector/pdata/testdata => ../../pdata/testdata diff --git a/consumer/consumertest/go.sum b/consumer/consumertest/go.sum new file mode 100644 index 00000000000..528166b78c0 --- /dev/null +++ b/consumer/consumertest/go.sum @@ -0,0 +1,77 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/consumer/consumertest/nop.go b/consumer/consumertest/nop.go index fbb01e3bb98..25b898a7751 100644 --- a/consumer/consumertest/nop.go +++ b/consumer/consumertest/nop.go @@ -8,14 +8,16 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) // NewNop returns a Consumer that just drops all received data and returns no error. func NewNop() Consumer { return &baseConsumer{ - ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return nil }, - ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return nil }, - ConsumeLogsFunc: func(context.Context, plog.Logs) error { return nil }, + ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return nil }, + ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return nil }, + ConsumeLogsFunc: func(context.Context, plog.Logs) error { return nil }, + ConsumeProfilesFunc: func(context.Context, pprofile.Profiles) error { return nil }, } } diff --git a/consumer/consumertest/nop_test.go b/consumer/consumertest/nop_test.go index 21f4f90c9d6..21b9c43240a 100644 --- a/consumer/consumertest/nop_test.go +++ b/consumer/consumertest/nop_test.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -22,4 +23,5 @@ func TestNop(t *testing.T) { assert.NoError(t, nc.ConsumeLogs(context.Background(), plog.NewLogs())) assert.NoError(t, nc.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.NoError(t, nc.ConsumeTraces(context.Background(), ptrace.NewTraces())) + assert.NoError(t, nc.ConsumeProfiles(context.Background(), pprofile.NewProfiles())) } diff --git a/consumer/consumertest/sink.go b/consumer/consumertest/sink.go index be7195af18f..ec35e717ae0 100644 --- a/consumer/consumertest/sink.go +++ b/consumer/consumertest/sink.go @@ -8,8 +8,10 @@ import ( "sync" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerprofiles" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -156,3 +158,41 @@ func (sle *LogsSink) Reset() { sle.logs = nil sle.logRecordCount = 0 } + +// ProfilesSink is a consumerprofiles.Profiles that acts like a sink that +// stores all profiles and allows querying them for testing. +type ProfilesSink struct { + nonMutatingConsumer + mu sync.Mutex + profiles []pprofile.Profiles +} + +var _ consumerprofiles.Profiles = (*ProfilesSink)(nil) + +// ConsumeProfiles stores profiles to this sink. +func (ste *ProfilesSink) ConsumeProfiles(_ context.Context, td pprofile.Profiles) error { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.profiles = append(ste.profiles, td) + + return nil +} + +// AllProfiles returns the profiles stored by this sink since last Reset. +func (ste *ProfilesSink) AllProfiles() []pprofile.Profiles { + ste.mu.Lock() + defer ste.mu.Unlock() + + copyProfiles := make([]pprofile.Profiles, len(ste.profiles)) + copy(copyProfiles, ste.profiles) + return copyProfiles +} + +// Reset deletes any stored data. +func (ste *ProfilesSink) Reset() { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.profiles = nil +} diff --git a/consumer/consumertest/sink_test.go b/consumer/consumertest/sink_test.go index bd29ebc6aef..3c813cf2f71 100644 --- a/consumer/consumertest/sink_test.go +++ b/consumer/consumertest/sink_test.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/testdata" ) @@ -60,3 +61,16 @@ func TestLogsSink(t *testing.T) { assert.Equal(t, 0, len(sink.AllLogs())) assert.Equal(t, 0, sink.LogRecordCount()) } + +func TestProfilesSink(t *testing.T) { + sink := new(ProfilesSink) + td := testdata.GenerateProfiles(1) + want := make([]pprofile.Profiles, 0, 7) + for i := 0; i < 7; i++ { + require.NoError(t, sink.ConsumeProfiles(context.Background(), td)) + want = append(want, td) + } + assert.Equal(t, want, sink.AllProfiles()) + sink.Reset() + assert.Equal(t, 0, len(sink.AllProfiles())) +} diff --git a/docs/release.md b/docs/release.md index 8263765c9e8..daab8aa6a74 100644 --- a/docs/release.md +++ b/docs/release.md @@ -46,9 +46,7 @@ It is possible that a core approver isn't a contrib approver. In that case, the If you set your remote using `https` you need to include `REMOTE=https://github.com/open-telemetry/opentelemetry-collector.git` in each command. Wait for the new tag build to pass successfully. -6. The release script for the collector builder should create a new GitHub release for the builder. This is a separate release from the core, but we might join them in the future if it makes sense. - -7. A new `v0.85.0` release should be automatically created on Github by now. Edit it and use the contents from the CHANGELOG.md and CHANGELOG-API.md as the release's description. +6. A new `v0.85.0` source code release should be automatically created on Github by now. Edit it and use the contents from the CHANGELOG.md and CHANGELOG-API.md as the release's description. ## Releasing opentelemetry-collector-contrib @@ -73,19 +71,20 @@ It is possible that a core approver isn't a contrib approver. In that case, the 5. A new `v0.85.0` release should be automatically created on Github by now. Edit it and use the contents from the CHANGELOG.md as the release's description. ## Producing the artifacts - The last step of the release process creates artifacts for the new version of the collector and publishes images to Dockerhub. The steps in this portion of the release are done in the [opentelemetry-collector-releases](https://github.com/open-telemetry/opentelemetry-collector-releases) repo. -1. Update the `./distribution/**/manifest.yaml` files to include the new release version. +1. Update the `./distributions/**/manifest.yaml` files to include the new release version. 2. Update the builder version in `OTELCOL_BUILDER_VERSION` to the new release in the `Makefile`. While this might not be strictly necessary for every release, this is a good practice. -3. Create a pull request with the change and ensure the build completes successfully. See [example](https://github.com/open-telemetry/opentelemetry-collector-releases/pull/71). +3. Create and push a new branch with this change, and apply the tag `cmd/builder/v0.xxx.0` to match the version number used above by running the command `make push-tags TAG=cmd/builder/v0.xxx.0`. This will build and release the ocb binaries under the opentelemetry-collector-releases distribution and is required to be completed **before** creating the pull request to merge the branch in the next step below. Building the distributions relies on using the latest ocb binary and if this workflow does not complete successfully, the rest of the build/release actions may not complete successfully. + +4. Create a pull request with the change and ensure the build completes successfully. See [example](https://github.com/open-telemetry/opentelemetry-collector-releases/pull/71). This pull request will also initiate and release the opentelemetry collector builder (ocb) binaries in a separate release; this is required to use the newest ocb binary to build the collector distributions below. - 🛑 **Do not move forward until this PR is merged.** 🛑 -4. Check out the commit created by merging the PR and tag with the new release version by running the `make push-tags TAG=v0.85.0` command. If you set your remote using `https` you need to include `REMOTE=https://github.com/open-telemetry/opentelemetry-collector-releases.git` in each command. Wait for the new tag build to pass successfully. +5. Check out the commit created by merging the PR and tag with the new release version by running the `make push-tags TAG=v0.85.0` command. If you set your remote using `https` you need to include `REMOTE=https://github.com/open-telemetry/opentelemetry-collector-releases.git` in each command. Wait for the new tag build to pass successfully. -5. Ensure the "Release" action passes, this will +6. Ensure the "Release" action passes, this will 1. push new container images to `https://hub.docker.com/repository/docker/otel/opentelemetry-collector` and `https://hub.docker.com/repository/docker/otel/opentelemetry-collector-contrib` @@ -160,7 +159,6 @@ Once a module is ready to be released under the `1.x` version scheme, file a PR | Date | Version | Release manager | |------------|----------|---------------------------------------------------| -| 2024-07-15 | v0.105.0 | [@atoulme](https://github.com/atoulme) | | 2024-07-29 | v0.106.0 | [@songy23](https://github.com/songy23) | | 2024-08-12 | v0.107.0 | [@dmitryax](https://github.com/dmitryax) | | 2024-08-26 | v0.108.0 | [@codeboten](https://github.com/codeboten) | @@ -170,3 +168,4 @@ Once a module is ready to be released under the `1.x` version scheme, file a PR | 2024-10-21 | v0.112.0 | [@evan-bradley](https://github.com/evan-bradley) | | 2024-11-04 | v0.113.0 | [@djaglowski](https://github.com/djaglowski) | | 2024-11-18 | v0.114.0 | [@TylerHelmuth](https://github.com/TylerHelmuth) | +| 2024-12-02 | v0.115.0 | [@atoulme](https://github.com/atoulme) | diff --git a/docs/rfcs/env-vars.md b/docs/rfcs/env-vars.md index a2709412788..0be8bf8c30b 100644 --- a/docs/rfcs/env-vars.md +++ b/docs/rfcs/env-vars.md @@ -178,22 +178,21 @@ matches `\${[^$}]+}`). ### Type casting rules The environment variable value is parsed by the yaml.v3 parser to an -any-typed variable and the original representation as a string is stored -for numeric types. The `yaml.v3` parser mostly follows the YAML v1.2 -specification with [*some +any-typed variable and the original representation as a string is also stored. +The `yaml.v3` parser mostly follows the YAML v1.2 specification with [*some exceptions*](https://github.com/go-yaml/yaml#compatibility). You can see how it works for some edge cases in this example: [*https://go.dev/play/p/RtPmH8aZA1X*](https://go.dev/play/p/RtPmH8aZA1X). When unmarshalling, we use mapstructure with WeaklyTypedInput -**disabled**. We check via a hook an `AsString` method from confmap.Conf +**disabled**. We check via a hook the original string representation of the data and use its return value when it is valid and we are mapping to a string field. This method has default casting rules for unambiguous scalar types but may return the original representation depending on the construction of confmap.Conf (see the comparison table below for details). For using this notation in inline mode (e.g.`http://endpoint/${env:PATH}`), we -use the `AsString` method from confmap.Conf (see the comparison table below for details). +use the original string representation as well (see the comparison table below for details). ### Character set @@ -216,7 +215,7 @@ loading a field with the braces syntax, `env` syntax. | `0123` | integer | 83 | 83 | 83 | n/a | | `0123` | string | 0123 | 83 | 0123 | 0123 | | `0xdeadbeef` | string | 0xdeadbeef | 3735928559 | 0xdeadbeef | 0xdeadbeef | -| `"0123"` | string | "0123" | 0123 | 0123 | 0123 | -| `!!str 0123` | string | !!str 0123 | 0123 | 0123 | 0123 | +| `"0123"` | string | "0123" | 0123 | "0123" | "0123" | +| `!!str 0123` | string | !!str 0123 | 0123 | !!str 0123 | !!str 0123 | | `t` | boolean | true | true | Error: mapping string to bool | n/a | | `23` | boolean | true | true | Error: mapping integer to bool | n/a | diff --git a/docs/security-best-practices.md b/docs/security-best-practices.md index 8c57d09ebc9..a4bd7fdcb14 100644 --- a/docs/security-best-practices.md +++ b/docs/security-best-practices.md @@ -153,6 +153,109 @@ To change the default endpoint to be `localhost`-bound in all components, enable If `localhost` resolves to a different IP due to your DNS settings then explicitly use the loopback IP instead: `127.0.0.1` for IPv4 or `::1` for IPv6. In IPv6 setups, ensure your system supports both IPv4 and IPv6 loopback addresses to avoid issues. +Using `localhost` may not work in environments like Docker, Kubernetes, and other environments that have non-standard networking setups. We've documented a few working example setups for the OTLP receiver gRPC endpoint below, but other receivers and other Collector components may need similar configuration. + +#### Docker +You can run the Collector in Docker by binding to the correct address. An OTLP exporter in Docker might look something like this: + +Collector config file + +`config.yaml`: +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: my-hostname:4317 # the same hostname from your docker run command +``` +Docker run command: +`docker run --hostname my-hostname --name container-name -p 127.0.0.1:4567:4317 otel/opentelemetry-collector:0.104.0` + +The key here is using the `--hostname` argument - that allows the collector to bind to the `my-hostname` address. +You could access it from outside that Docker network (for example on a regular program running on the host) by connecting to `127.0.0.1:4567`. + +#### Docker Compose +Similarly to plain Docker, you can run the Collector in Docker by binding to the correct address. + +`compose.yaml`: +```yaml +services: + otel-collector: + image: otel/opentelemetry-collector-contrib:0.104.0 + ports: + - "4567:4317" +``` + +Collector config file: + +`config.yaml`: +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: otel-collector:4317 # Using the service name from your Docker compose file +``` + +You can connect to this Collector from another Docker container running in the same network by connecting to `otel-collector:4317`. You could access it from outside that Docker network (for example on a regular program running on the host) by connecting to `127.0.0.1:4567`. + +#### Kubernetes +If you run the Collector as a `Daemonset`, you can use a configuration like below: +```yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: collector +spec: + selector: + matchLabels: + name: collector + template: + metadata: + labels: + name: collector + spec: + containers: + - name: collector + image: otel/opentelemetry-collector:0.104.0 + ports: + - containerPort: 4317 + hostPort: 4317 + protocol: TCP + name: otlp-grpc + - containerPort: 4318 + hostPort: 4318 + protocol: TCP + name: otlp-http + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + +``` +In this example, we use the [Kubernetes Downward API](https://kubernetes.io/docs/concepts/workloads/pods/downward-api/) to get your own Pod IP, then bind to that network interface. Then, we use the `hostPort` option to ensure that the Collector is exposed on the host. The Collector's config should look something like: + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 +``` + +You can send OTLP data to this Collector from any Pod on the Node by accessing `${MY_HOST_IP}:4317` to send OTLP over gRPC and `${MY_HOST_IP}:4318` to send OTLP over HTTP, where `MY_HOST_IP` is the Node's IP address. You can get this IP from the Downwards API: + +```yaml +env: + - name: MY_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP +``` + ## Processors Processors sit between receivers and exporters. They are responsible for diff --git a/exporter/debugexporter/go.mod b/exporter/debugexporter/go.mod index 959b20a8763..165281a2318 100644 --- a/exporter/debugexporter/go.mod +++ b/exporter/debugexporter/go.mod @@ -42,6 +42,8 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector v0.105.0 // indirect go.opentelemetry.io/collector/config/configretry v1.12.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 // indirect go.opentelemetry.io/collector/extension v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect @@ -90,3 +92,7 @@ replace go.opentelemetry.io/collector/extension => ../../extension replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/configtelemetry replace go.opentelemetry.io/collector/config/configretry => ../../config/configretry + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/exporter/exporterhelper/common.go b/exporter/exporterhelper/common.go index 4016c14e7c9..8d68c06bed7 100644 --- a/exporter/exporterhelper/common.go +++ b/exporter/exporterhelper/common.go @@ -110,7 +110,7 @@ func WithQueue(config QueueSettings) Option { NumConsumers: config.NumConsumers, QueueSize: config.QueueSize, }) - o.queueSender = newQueueSender(q, o.set, config.NumConsumers, o.exportFailureMessage, o.obsrep.telemetryBuilder) + o.queueSender = newQueueSender(q, o.set, config.NumConsumers, o.exportFailureMessage, o.obsrep) return nil } } @@ -132,7 +132,7 @@ func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Facto DataType: o.signal, ExporterSettings: o.set, } - o.queueSender = newQueueSender(queueFactory(context.Background(), set, cfg), o.set, cfg.NumConsumers, o.exportFailureMessage, o.obsrep.telemetryBuilder) + o.queueSender = newQueueSender(queueFactory(context.Background(), set, cfg), o.set, cfg.NumConsumers, o.exportFailureMessage, o.obsrep) return nil } } @@ -250,7 +250,7 @@ type baseExporter struct { } func newBaseExporter(set exporter.Settings, signal component.DataType, osf obsrepSenderFactory, options ...Option) (*baseExporter, error) { - obsReport, err := NewObsReport(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set}) + obsReport, err := NewObsReport(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set, DataType: signal}) if err != nil { return nil, err } diff --git a/exporter/exporterhelper/documentation.md b/exporter/exporterhelper/documentation.md index 1279ece2db9..a82163a2bfa 100644 --- a/exporter/exporterhelper/documentation.md +++ b/exporter/exporterhelper/documentation.md @@ -12,7 +12,7 @@ Number of log records failed to be added to the sending queue. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_exporter_enqueue_failed_metric_points @@ -20,7 +20,7 @@ Number of metric points failed to be added to the sending queue. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_exporter_enqueue_failed_spans @@ -28,7 +28,7 @@ Number of spans failed to be added to the sending queue. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | ### otelcol_exporter_queue_capacity @@ -36,7 +36,7 @@ Fixed capacity of the retry queue (in batches) | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {batches} | Gauge | Int | ### otelcol_exporter_queue_size @@ -44,7 +44,7 @@ Current size of the retry queue (in batches) | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {batches} | Gauge | Int | ### otelcol_exporter_send_failed_log_records @@ -52,7 +52,7 @@ Number of log records in failed attempts to send to destination. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_exporter_send_failed_metric_points @@ -60,7 +60,7 @@ Number of metric points in failed attempts to send to destination. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_exporter_send_failed_spans @@ -68,7 +68,7 @@ Number of spans in failed attempts to send to destination. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | ### otelcol_exporter_sent_log_records @@ -76,7 +76,7 @@ Number of log record successfully sent to destination. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_exporter_sent_metric_points @@ -84,7 +84,7 @@ Number of metric points successfully sent to destination. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_exporter_sent_spans @@ -92,4 +92,4 @@ Number of spans successfully sent to destination. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | diff --git a/exporter/exporterhelper/internal/metadata/generated_telemetry.go b/exporter/exporterhelper/internal/metadata/generated_telemetry.go index 125863e4d10..d0b27bf441b 100644 --- a/exporter/exporterhelper/internal/metadata/generated_telemetry.go +++ b/exporter/exporterhelper/internal/metadata/generated_telemetry.go @@ -56,7 +56,7 @@ func (builder *TelemetryBuilder) InitExporterQueueCapacity(cb func() int64, opts builder.ExporterQueueCapacity, err = builder.meter.Int64ObservableGauge( "otelcol_exporter_queue_capacity", metric.WithDescription("Fixed capacity of the retry queue (in batches)"), - metric.WithUnit("1"), + metric.WithUnit("{batches}"), ) if err != nil { return err @@ -74,7 +74,7 @@ func (builder *TelemetryBuilder) InitExporterQueueSize(cb func() int64, opts ... builder.ExporterQueueSize, err = builder.meter.Int64ObservableGauge( "otelcol_exporter_queue_size", metric.WithDescription("Current size of the retry queue (in batches)"), - metric.WithUnit("1"), + metric.WithUnit("{batches}"), ) if err != nil { return err @@ -102,55 +102,55 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme builder.ExporterEnqueueFailedLogRecords, err = builder.meter.Int64Counter( "otelcol_exporter_enqueue_failed_log_records", metric.WithDescription("Number of log records failed to be added to the sending queue."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ExporterEnqueueFailedMetricPoints, err = builder.meter.Int64Counter( "otelcol_exporter_enqueue_failed_metric_points", metric.WithDescription("Number of metric points failed to be added to the sending queue."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ExporterEnqueueFailedSpans, err = builder.meter.Int64Counter( "otelcol_exporter_enqueue_failed_spans", metric.WithDescription("Number of spans failed to be added to the sending queue."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ExporterSendFailedLogRecords, err = builder.meter.Int64Counter( "otelcol_exporter_send_failed_log_records", metric.WithDescription("Number of log records in failed attempts to send to destination."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ExporterSendFailedMetricPoints, err = builder.meter.Int64Counter( "otelcol_exporter_send_failed_metric_points", metric.WithDescription("Number of metric points in failed attempts to send to destination."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ExporterSendFailedSpans, err = builder.meter.Int64Counter( "otelcol_exporter_send_failed_spans", metric.WithDescription("Number of spans in failed attempts to send to destination."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ExporterSentLogRecords, err = builder.meter.Int64Counter( "otelcol_exporter_sent_log_records", metric.WithDescription("Number of log record successfully sent to destination."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ExporterSentMetricPoints, err = builder.meter.Int64Counter( "otelcol_exporter_sent_metric_points", metric.WithDescription("Number of metric points successfully sent to destination."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ExporterSentSpans, err = builder.meter.Int64Counter( "otelcol_exporter_sent_spans", metric.WithDescription("Number of spans successfully sent to destination."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) return &builder, errs diff --git a/exporter/exporterhelper/metadata.yaml b/exporter/exporterhelper/metadata.yaml index dea17a17089..e10113eb494 100644 --- a/exporter/exporterhelper/metadata.yaml +++ b/exporter/exporterhelper/metadata.yaml @@ -12,7 +12,7 @@ telemetry: exporter_sent_spans: enabled: true description: Number of spans successfully sent to destination. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -20,7 +20,7 @@ telemetry: exporter_send_failed_spans: enabled: true description: Number of spans in failed attempts to send to destination. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -28,7 +28,7 @@ telemetry: exporter_enqueue_failed_spans: enabled: true description: Number of spans failed to be added to the sending queue. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -36,7 +36,7 @@ telemetry: exporter_sent_metric_points: enabled: true description: Number of metric points successfully sent to destination. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -44,7 +44,7 @@ telemetry: exporter_send_failed_metric_points: enabled: true description: Number of metric points in failed attempts to send to destination. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -52,7 +52,7 @@ telemetry: exporter_enqueue_failed_metric_points: enabled: true description: Number of metric points failed to be added to the sending queue. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -60,7 +60,7 @@ telemetry: exporter_sent_log_records: enabled: true description: Number of log record successfully sent to destination. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true @@ -68,7 +68,7 @@ telemetry: exporter_send_failed_log_records: enabled: true description: Number of log records in failed attempts to send to destination. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true @@ -76,7 +76,7 @@ telemetry: exporter_enqueue_failed_log_records: enabled: true description: Number of log records failed to be added to the sending queue. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true @@ -84,7 +84,7 @@ telemetry: exporter_queue_size: enabled: true description: Current size of the retry queue (in batches) - unit: "1" + unit: "{batches}" optional: true gauge: value_type: int @@ -93,7 +93,7 @@ telemetry: exporter_queue_capacity: enabled: true description: Fixed capacity of the retry queue (in batches) - unit: "1" + unit: "{batches}" optional: true gauge: value_type: int diff --git a/exporter/exporterhelper/obsexporter.go b/exporter/exporterhelper/obsexporter.go index d5d366ccabe..70cc0642c47 100644 --- a/exporter/exporterhelper/obsexporter.go +++ b/exporter/exporterhelper/obsexporter.go @@ -26,6 +26,7 @@ type ObsReport struct { level configtelemetry.Level spanNamePrefix string tracer trace.Tracer + dataType component.DataType otelAttrs []attribute.KeyValue telemetryBuilder *metadata.TelemetryBuilder @@ -38,6 +39,7 @@ type ObsReport struct { type ObsReportSettings struct { ExporterID component.ID ExporterCreateSettings exporter.Settings + DataType component.DataType } // NewObsReport creates a new Exporter. @@ -58,7 +60,7 @@ func newExporter(cfg ObsReportSettings) (*ObsReport, error) { level: cfg.ExporterCreateSettings.TelemetrySettings.MetricsLevel, spanNamePrefix: obsmetrics.ExporterPrefix + cfg.ExporterID.String(), tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), - + dataType: cfg.DataType, otelAttrs: []attribute.KeyValue{ attribute.String(obsmetrics.ExporterKey, cfg.ExporterID.String()), }, diff --git a/exporter/exporterhelper/obsreport_test.go b/exporter/exporterhelper/obsreport_test.go index daafd422e61..d7c4795615b 100644 --- a/exporter/exporterhelper/obsreport_test.go +++ b/exporter/exporterhelper/obsreport_test.go @@ -15,7 +15,6 @@ import ( ) func TestExportEnqueueFailure(t *testing.T) { - exporterID := component.MustNewID("fakeExporter") tt, err := componenttest.SetupTelemetry(exporterID) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) diff --git a/exporter/exporterhelper/queue_sender.go b/exporter/exporterhelper/queue_sender.go index 88d17a7c03f..d7166335557 100644 --- a/exporter/exporterhelper/queue_sender.go +++ b/exporter/exporterhelper/queue_sender.go @@ -15,7 +15,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" @@ -74,18 +73,18 @@ type queueSender struct { traceAttribute attribute.KeyValue consumers *queue.Consumers[Request] - telemetryBuilder *metadata.TelemetryBuilder - exporterID component.ID + obsrep *ObsReport + exporterID component.ID } func newQueueSender(q exporterqueue.Queue[Request], set exporter.Settings, numConsumers int, - exportFailureMessage string, telemetryBuilder *metadata.TelemetryBuilder) *queueSender { + exportFailureMessage string, obsrep *ObsReport) *queueSender { qs := &queueSender{ - queue: q, - numConsumers: numConsumers, - traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), - telemetryBuilder: telemetryBuilder, - exporterID: set.ID, + queue: q, + numConsumers: numConsumers, + traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), + obsrep: obsrep, + exporterID: set.ID, } consumeFunc := func(ctx context.Context, req Request) error { err := qs.nextSender.send(ctx, req) @@ -105,10 +104,12 @@ func (qs *queueSender) Start(ctx context.Context, host component.Host) error { return err } - opts := metric.WithAttributeSet(attribute.NewSet(attribute.String(obsmetrics.ExporterKey, qs.exporterID.String()))) + dataTypeAttr := attribute.String(obsmetrics.DataTypeKey, qs.obsrep.dataType.String()) return multierr.Append( - qs.telemetryBuilder.InitExporterQueueSize(func() int64 { return int64(qs.queue.Size()) }, opts), - qs.telemetryBuilder.InitExporterQueueCapacity(func() int64 { return int64(qs.queue.Capacity()) }, opts), + qs.obsrep.telemetryBuilder.InitExporterQueueSize(func() int64 { return int64(qs.queue.Size()) }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute, dataTypeAttr))), + qs.obsrep.telemetryBuilder.InitExporterQueueCapacity(func() int64 { return int64(qs.queue.Capacity()) }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute))), ) } diff --git a/exporter/exporterhelper/queue_sender_test.go b/exporter/exporterhelper/queue_sender_test.go index ff9aa7c5f74..bd783512d3b 100644 --- a/exporter/exporterhelper/queue_sender_test.go +++ b/exporter/exporterhelper/queue_sender_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" @@ -18,10 +19,10 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/exportertest" "go.opentelemetry.io/collector/exporter/internal/queue" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" ) func TestQueuedRetry_StopWhileWaiting(t *testing.T) { @@ -202,28 +203,33 @@ func TestQueuedRetryHappyPath(t *testing.T) { }) } } -func TestQueuedRetry_QueueMetricsReported(t *testing.T) { - tt, err := componenttest.SetupTelemetry(defaultID) - require.NoError(t, err) - qCfg := NewDefaultQueueSettings() - qCfg.NumConsumers = 0 // to make every request go straight to the queue - rCfg := configretry.NewDefaultBackOffConfig() - set := exporter.Settings{ID: defaultID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), - WithRetry(rCfg), WithQueue(qCfg)) - require.NoError(t, err) - require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - - require.NoError(t, tt.CheckExporterMetricGauge("otelcol_exporter_queue_capacity", int64(defaultQueueSize))) +func TestQueuedRetry_QueueMetricsReported(t *testing.T) { + dataTypes := []component.DataType{component.DataTypeLogs, component.DataTypeTraces, component.DataTypeMetrics} + for _, dataType := range dataTypes { + tt, err := componenttest.SetupTelemetry(defaultID) + require.NoError(t, err) + + qCfg := NewDefaultQueueSettings() + qCfg.NumConsumers = 0 // to make every request go straight to the queue + rCfg := configretry.NewDefaultBackOffConfig() + set := exporter.Settings{ID: defaultID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} + be, err := newBaseExporter(set, dataType, newObservabilityConsumerSender, + withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + WithRetry(rCfg), WithQueue(qCfg)) + require.NoError(t, err) + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + + require.NoError(t, tt.CheckExporterMetricGauge("otelcol_exporter_queue_capacity", int64(defaultQueueSize))) + + for i := 0; i < 7; i++ { + require.NoError(t, be.send(context.Background(), newErrorRequest())) + } + require.NoError(t, tt.CheckExporterMetricGauge("otelcol_exporter_queue_size", int64(7), + attribute.String(obsmetrics.DataTypeKey, dataType.String()))) - for i := 0; i < 7; i++ { - require.NoError(t, be.send(context.Background(), newErrorRequest())) + assert.NoError(t, be.Shutdown(context.Background())) } - require.NoError(t, tt.CheckExporterMetricGauge("otelcol_exporter_queue_size", int64(7))) - - assert.NoError(t, be.Shutdown(context.Background())) } func TestNoCancellationContext(t *testing.T) { @@ -426,9 +432,12 @@ func TestQueuedRetryPersistentEnabled_NoDataLossOnShutdown(t *testing.T) { func TestQueueSenderNoStartShutdown(t *testing.T) { queue := queue.NewBoundedMemoryQueue[Request](queue.MemoryQueueSettings[Request]{}) set := exportertest.NewNopSettings() - builder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + obsrep, err := NewObsReport(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exportertest.NewNopSettings(), + }) assert.NoError(t, err) - qs := newQueueSender(queue, set, 1, "", builder) + qs := newQueueSender(queue, set, 1, "", obsrep) assert.NoError(t, qs.Shutdown(context.Background())) } diff --git a/exporter/go.mod b/exporter/go.mod index 26f7e2514de..8858ef764e6 100644 --- a/exporter/go.mod +++ b/exporter/go.mod @@ -11,6 +11,7 @@ require ( go.opentelemetry.io/collector/config/configretry v1.12.0 go.opentelemetry.io/collector/config/configtelemetry v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/extension v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/pdata/testdata v0.105.0 @@ -51,6 +52,7 @@ require ( github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector/confmap v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect @@ -89,3 +91,7 @@ retract v0.76.0 // Depends on retracted pdata v1.0.0-rc10 module replace go.opentelemetry.io/collector/config/configretry => ../config/configretry replace go.opentelemetry.io/collector/config/configtelemetry => ../config/configtelemetry + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../consumer/consumertest diff --git a/exporter/internal/factory.go b/exporter/internal/factory.go index 193e22a7718..efcc3164caa 100644 --- a/exporter/internal/factory.go +++ b/exporter/internal/factory.go @@ -7,31 +7,33 @@ import ( "context" "go.opentelemetry.io/collector/component" -) // Factory is a factory interface for exporters. +) + +// Factory is a factory interface for exporters. // This interface cannot be directly implemented. Implementations must // use the NewFactory to implement it. type Factory interface { component.Factory // CreateTracesExporter creates a TracesExporter based on this config. - // If the exporter type does not support tracing or if the config is not valid, - // an error will be returned instead. + // If the exporter type does not support tracing, + // this function returns the error [component.ErrDataTypeIsNotSupported]. CreateTracesExporter(ctx context.Context, set Settings, cfg component.Config) (Traces, error) // TracesExporterStability gets the stability level of the TracesExporter. TracesExporterStability() component.StabilityLevel // CreateMetricsExporter creates a MetricsExporter based on this config. - // If the exporter type does not support metrics or if the config is not valid, - // an error will be returned instead. + // If the exporter type does not support metrics, + // this function returns the error [component.ErrDataTypeIsNotSupported]. CreateMetricsExporter(ctx context.Context, set Settings, cfg component.Config) (Metrics, error) // MetricsExporterStability gets the stability level of the MetricsExporter. MetricsExporterStability() component.StabilityLevel // CreateLogsExporter creates a LogsExporter based on the config. - // If the exporter type does not support logs or if the config is not valid, - // an error will be returned instead. + // If the exporter type does not support logs, + // this function returns the error [component.ErrDataTypeIsNotSupported]. CreateLogsExporter(ctx context.Context, set Settings, cfg component.Config) (Logs, error) // LogsExporterStability gets the stability level of the LogsExporter. diff --git a/exporter/loggingexporter/go.mod b/exporter/loggingexporter/go.mod index b4d7de699d1..9b7444e4a0d 100644 --- a/exporter/loggingexporter/go.mod +++ b/exporter/loggingexporter/go.mod @@ -42,9 +42,12 @@ require ( go.opentelemetry.io/collector v0.105.0 // indirect go.opentelemetry.io/collector/config/configretry v1.12.0 // indirect go.opentelemetry.io/collector/consumer v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 // indirect go.opentelemetry.io/collector/extension v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/collector/receiver v0.105.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect @@ -94,3 +97,7 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/con replace go.opentelemetry.io/collector/config/configretry => ../../config/configretry replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/exporter/nopexporter/go.mod b/exporter/nopexporter/go.mod index 757e0eb0719..a8d37c00208 100644 --- a/exporter/nopexporter/go.mod +++ b/exporter/nopexporter/go.mod @@ -6,7 +6,7 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.105.0 go.opentelemetry.io/collector/confmap v0.105.0 - go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/exporter v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.uber.org/goleak v1.3.0 @@ -37,8 +37,11 @@ require ( github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect + go.opentelemetry.io/collector/consumer v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/collector/receiver v0.105.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect @@ -84,3 +87,7 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/con replace go.opentelemetry.io/collector/extension => ../../extension replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/exporter/otlpexporter/go.mod b/exporter/otlpexporter/go.mod index f837c8b0065..191e4bb061d 100644 --- a/exporter/otlpexporter/go.mod +++ b/exporter/otlpexporter/go.mod @@ -57,6 +57,8 @@ require ( go.opentelemetry.io/collector/config/confignet v0.105.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect go.opentelemetry.io/collector/config/internal v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 // indirect go.opentelemetry.io/collector/extension v0.105.0 // indirect go.opentelemetry.io/collector/extension/auth v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect @@ -138,3 +140,7 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/con replace go.opentelemetry.io/collector/config/configretry => ../../config/configretry replace go.opentelemetry.io/collector/internal/globalgates => ../../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/exporter/otlphttpexporter/go.mod b/exporter/otlphttpexporter/go.mod index 59ba2cc3c42..e6604d26407 100644 --- a/exporter/otlphttpexporter/go.mod +++ b/exporter/otlphttpexporter/go.mod @@ -56,10 +56,13 @@ require ( go.opentelemetry.io/collector/config/configauth v0.105.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect go.opentelemetry.io/collector/config/internal v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 // indirect go.opentelemetry.io/collector/extension v0.105.0 // indirect go.opentelemetry.io/collector/extension/auth v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/collector/receiver v0.105.0 // indirect go.opentelemetry.io/contrib/config v0.8.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect @@ -134,3 +137,7 @@ retract ( replace go.opentelemetry.io/collector/config/configretry => ../../config/configretry replace go.opentelemetry.io/collector/internal/globalgates => ../../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/extension/ballastextension/go.mod b/extension/ballastextension/go.mod index 15a7d2d51d7..054f7cd7c10 100644 --- a/extension/ballastextension/go.mod +++ b/extension/ballastextension/go.mod @@ -87,3 +87,7 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/con replace go.opentelemetry.io/collector/pdata/testdata => ../../pdata/testdata replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/extension/memorylimiterextension/go.mod b/extension/memorylimiterextension/go.mod index df7cf0f39b5..09baf6c125f 100644 --- a/extension/memorylimiterextension/go.mod +++ b/extension/memorylimiterextension/go.mod @@ -81,3 +81,7 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/con replace go.opentelemetry.io/collector/pdata/testdata => ../../pdata/testdata replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/extension/zpagesextension/go.mod b/extension/zpagesextension/go.mod index 3e9a20d71ed..08d18b0c05e 100644 --- a/extension/zpagesextension/go.mod +++ b/extension/zpagesextension/go.mod @@ -122,3 +122,7 @@ replace go.opentelemetry.io/collector/config/confighttp => ../../config/confight replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile replace go.opentelemetry.io/collector/internal/globalgates => ../../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/go.mod b/go.mod index d440896095b..00fd2e54769 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( go.opentelemetry.io/collector/component v0.105.0 go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/featuregate v1.12.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/pdata/testdata v0.105.0 @@ -59,6 +60,7 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect @@ -95,6 +97,10 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ./config/configt replace go.opentelemetry.io/collector/consumer => ./consumer +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ./consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ./consumer/consumertest + replace go.opentelemetry.io/collector/featuregate => ./featuregate replace go.opentelemetry.io/collector/pdata => ./pdata diff --git a/internal/e2e/go.mod b/internal/e2e/go.mod index 5792d76c2d0..b16094c5fc1 100644 --- a/internal/e2e/go.mod +++ b/internal/e2e/go.mod @@ -13,6 +13,7 @@ require ( go.opentelemetry.io/collector/config/configtls v1.12.0 go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/exporter v0.105.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.105.0 go.opentelemetry.io/collector/exporter/otlphttpexporter v0.105.0 @@ -60,6 +61,7 @@ require ( go.opentelemetry.io/collector/config/confignet v0.105.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect go.opentelemetry.io/collector/config/internal v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/extension v0.105.0 // indirect go.opentelemetry.io/collector/extension/auth v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect @@ -148,3 +150,7 @@ replace go.opentelemetry.io/collector/featuregate => ../../featuregate replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/configtelemetry replace go.opentelemetry.io/collector/internal/globalgates => ../globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/internal/memorylimiter/memorylimiter.go b/internal/memorylimiter/memorylimiter.go index a010cba5427..db93795127a 100644 --- a/internal/memorylimiter/memorylimiter.go +++ b/internal/memorylimiter/memorylimiter.go @@ -44,7 +44,6 @@ type MemoryLimiter struct { usageChecker memUsageChecker memCheckWait time.Duration - ballastSize uint64 // mustRefuse is used to indicate when data should be refused. mustRefuse *atomic.Bool @@ -58,8 +57,7 @@ type MemoryLimiter struct { readMemStatsFn func(m *runtime.MemStats) // Fields used for logging. - logger *zap.Logger - configMismatchedLogged bool + logger *zap.Logger refCounterLock sync.Mutex refCounter int @@ -114,14 +112,7 @@ func (ml *MemoryLimiter) startMonitoring() { } } -func (ml *MemoryLimiter) Start(_ context.Context, host component.Host) error { - extensions := host.GetExtensions() - for _, extension := range extensions { - if ext, ok := extension.(interface{ GetBallastSize() uint64 }); ok { - ml.ballastSize = ext.GetBallastSize() - break - } - } +func (ml *MemoryLimiter) Start(_ context.Context, _ component.Host) error { ml.startMonitoring() return nil } @@ -168,16 +159,6 @@ func getMemUsageChecker(cfg *Config, logger *zap.Logger) (*memUsageChecker, erro func (ml *MemoryLimiter) readMemStats() *runtime.MemStats { ms := &runtime.MemStats{} ml.readMemStatsFn(ms) - // If proper configured ms.Alloc should be at least ml.ballastSize but since - // a misconfiguration is possible check for that here. - if ms.Alloc >= ml.ballastSize { - ms.Alloc -= ml.ballastSize - } else if !ml.configMismatchedLogged { - // This indicates misconfiguration. Log it once. - ml.configMismatchedLogged = true - ml.logger.Warn(`"size_mib" in ballast extension is likely incorrectly configured.`) - } - return ms } diff --git a/internal/memorylimiter/memorylimiter_test.go b/internal/memorylimiter/memorylimiter_test.go index e9e92a33f70..6919ce50968 100644 --- a/internal/memorylimiter/memorylimiter_test.go +++ b/internal/memorylimiter/memorylimiter_test.go @@ -4,17 +4,14 @@ package memorylimiter import ( - "context" "runtime" "sync/atomic" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/internal/iruntime" ) @@ -43,22 +40,6 @@ func TestMemoryPressureResponse(t *testing.T) { ml.CheckMemLimits() assert.True(t, ml.MustRefuse()) - // Check ballast effect - ml.ballastSize = 1000 - - // Below memAllocLimit accounting for ballast. - currentMemAlloc = 800 + ml.ballastSize - ml.CheckMemLimits() - assert.False(t, ml.MustRefuse()) - - // Above memAllocLimit even accounting for ballast. - currentMemAlloc = 1800 + ml.ballastSize - ml.CheckMemLimits() - assert.True(t, ml.MustRefuse()) - - // Restore ballast to default. - ml.ballastSize = 0 - // Check spike limit ml.usageChecker.memSpikeLimit = 512 @@ -151,38 +132,3 @@ func TestRefuseDecision(t *testing.T) { }) } } - -func TestBallastSize(t *testing.T) { - cfg := &Config{ - CheckInterval: 10 * time.Second, - MemoryLimitMiB: 1024, - } - got, err := NewMemoryLimiter(cfg, zap.NewNop()) - require.NoError(t, err) - - got.startMonitoring() - require.NoError(t, got.Start(context.Background(), &host{ballastSize: 113})) - assert.Equal(t, uint64(113), got.ballastSize) - require.NoError(t, got.Shutdown(context.Background())) -} - -type host struct { - ballastSize uint64 - component.Host -} - -func (h *host) GetExtensions() map[component.ID]component.Component { - ret := make(map[component.ID]component.Component) - ret[component.MustNewID("ballast")] = &ballastExtension{ballastSize: h.ballastSize} - return ret -} - -type ballastExtension struct { - ballastSize uint64 - component.StartFunc - component.ShutdownFunc -} - -func (be *ballastExtension) GetBallastSize() uint64 { - return be.ballastSize -} diff --git a/internal/obsreportconfig/obsmetrics/obs_exporter.go b/internal/obsreportconfig/obsmetrics/obs_exporter.go index b87270f5e5a..ffaa33a54a4 100644 --- a/internal/obsreportconfig/obsmetrics/obs_exporter.go +++ b/internal/obsreportconfig/obsmetrics/obs_exporter.go @@ -7,6 +7,9 @@ const ( // ExporterKey used to identify exporters in metrics and traces. ExporterKey = "exporter" + // DataTypeKey used to identify the data type in the queue size metric. + DataTypeKey = "data_type" + // SentSpansKey used to track spans sent by exporters. SentSpansKey = "sent_spans" // FailedToSendSpansKey used to track spans that failed to be sent by exporters. diff --git a/otelcol/go.mod b/otelcol/go.mod index f43a028057a..0699ca0b342 100644 --- a/otelcol/go.mod +++ b/otelcol/go.mod @@ -65,6 +65,8 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.105.0 // indirect go.opentelemetry.io/collector/consumer v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 // indirect go.opentelemetry.io/collector/pdata v1.12.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.105.0 // indirect @@ -148,3 +150,7 @@ replace go.opentelemetry.io/collector/config/configtls => ../config/configtls replace go.opentelemetry.io/collector/config/configopaque => ../config/configopaque replace go.opentelemetry.io/collector/internal/globalgates => ../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../consumer/consumertest diff --git a/otelcol/otelcoltest/go.mod b/otelcol/otelcoltest/go.mod index 17bccd9fe24..49b19a92393 100644 --- a/otelcol/otelcoltest/go.mod +++ b/otelcol/otelcoltest/go.mod @@ -63,6 +63,8 @@ require ( go.opentelemetry.io/collector v0.105.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect go.opentelemetry.io/collector/consumer v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect go.opentelemetry.io/collector/pdata v1.12.0 // indirect @@ -166,3 +168,7 @@ replace go.opentelemetry.io/collector/semconv => ../../semconv replace go.opentelemetry.io/collector/extension/auth => ../../extension/auth replace go.opentelemetry.io/collector/internal/globalgates => ../../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/processor/batchprocessor/go.mod b/processor/batchprocessor/go.mod index b50acb84105..32b1d86db1b 100644 --- a/processor/batchprocessor/go.mod +++ b/processor/batchprocessor/go.mod @@ -9,6 +9,7 @@ require ( go.opentelemetry.io/collector/config/configtelemetry v0.105.0 go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/pdata/testdata v0.105.0 go.opentelemetry.io/collector/processor v0.105.0 @@ -44,6 +45,7 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect @@ -85,3 +87,7 @@ retract ( replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/configtelemetry replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/processor/go.mod b/processor/go.mod index 4cdcb9b6e2f..f0af3fb6f1b 100644 --- a/processor/go.mod +++ b/processor/go.mod @@ -9,6 +9,7 @@ require ( go.opentelemetry.io/collector/component v0.105.0 go.opentelemetry.io/collector/config/configtelemetry v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/pdata/testdata v0.105.0 go.opentelemetry.io/otel v1.28.0 @@ -35,6 +36,7 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect @@ -67,3 +69,7 @@ replace go.opentelemetry.io/collector/pdata/pprofile => ../pdata/pprofile replace go.opentelemetry.io/collector/config/configtelemetry => ../config/configtelemetry replace go.opentelemetry.io/collector/internal/globalgates => ../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../consumer/consumertest diff --git a/processor/internal/factory.go b/processor/internal/factory.go index 293e65a5ae2..71adc202a1d 100644 --- a/processor/internal/factory.go +++ b/processor/internal/factory.go @@ -18,24 +18,27 @@ type Factory interface { component.Factory // CreateTracesProcessor creates a TracesProcessor based on this config. - // If the processor type does not support tracing or if the config is not valid, - // an error will be returned instead. + // If the processor type does not support traces, + // this function returns the error [component.ErrDataTypeIsNotSupported]. + // Implementers can assume `nextConsumer` is never nil. CreateTracesProcessor(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) // TracesProcessorStability gets the stability level of the TracesProcessor. TracesProcessorStability() component.StabilityLevel // CreateMetricsProcessor creates a MetricsProcessor based on this config. - // If the processor type does not support metrics or if the config is not valid, - // an error will be returned instead. + // If the processor type does not support metrics, + // this function returns the error [component.ErrDataTypeIsNotSupported]. + // Implementers can assume `nextConsumer` is never nil. CreateMetricsProcessor(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) // MetricsProcessorStability gets the stability level of the MetricsProcessor. MetricsProcessorStability() component.StabilityLevel // CreateLogsProcessor creates a LogsProcessor based on the config. - // If the processor type does not support logs or if the config is not valid, - // an error will be returned instead. + // If the processor type does not support logs, + // this function returns the error [component.ErrDataTypeIsNotSupported]. + // Implementers can assume `nextConsumer` is never nil. CreateLogsProcessor(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) // LogsProcessorStability gets the stability level of the LogsProcessor. diff --git a/processor/memorylimiterprocessor/go.mod b/processor/memorylimiterprocessor/go.mod index 4e83df6e363..17860bcb612 100644 --- a/processor/memorylimiterprocessor/go.mod +++ b/processor/memorylimiterprocessor/go.mod @@ -8,6 +8,7 @@ require ( go.opentelemetry.io/collector/component v0.105.0 go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/processor v0.105.0 go.uber.org/goleak v1.3.0 @@ -45,6 +46,7 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect @@ -92,3 +94,7 @@ retract ( replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/configtelemetry replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/processor/memorylimiterprocessor/memorylimiter_test.go b/processor/memorylimiterprocessor/memorylimiter_test.go index 1ce63d794f3..6172fa39889 100644 --- a/processor/memorylimiterprocessor/memorylimiter_test.go +++ b/processor/memorylimiterprocessor/memorylimiter_test.go @@ -122,7 +122,6 @@ func TestMetricsMemoryPressureResponse(t *testing.T) { tests := []struct { name string mlCfg *Config - ballastSize uint64 memAlloc uint64 expectError bool }{ @@ -133,7 +132,6 @@ func TestMetricsMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 1, }, - ballastSize: 0, memAlloc: 800, expectError: false, }, @@ -144,29 +142,6 @@ func TestMetricsMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 1, }, - ballastSize: 0, - memAlloc: 1800, - expectError: true, - }, - { - name: "Below memAllocLimit accounting for ballast", - mlCfg: &Config{ - CheckInterval: time.Second, - MemoryLimitPercentage: 50, - MemorySpikePercentage: 1, - }, - ballastSize: 1000, - memAlloc: 800, - expectError: false, - }, - { - name: "Above memAllocLimit even accounting for ballast", - mlCfg: &Config{ - CheckInterval: time.Second, - MemoryLimitPercentage: 50, - MemorySpikePercentage: 1, - }, - ballastSize: 1000, memAlloc: 1800, expectError: true, }, @@ -177,7 +152,6 @@ func TestMetricsMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 10, }, - ballastSize: 0, memAlloc: 800, expectError: false, }, @@ -188,7 +162,6 @@ func TestMetricsMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 11, }, - ballastSize: 0, memAlloc: 800, expectError: true, }, @@ -197,7 +170,7 @@ func TestMetricsMemoryPressureResponse(t *testing.T) { t.Run(tt.name, func(t *testing.T) { memorylimiter.GetMemoryFn = totalMemory memorylimiter.ReadMemStatsFn = func(ms *runtime.MemStats) { - ms.Alloc = tt.memAlloc + tt.ballastSize + ms.Alloc = tt.memAlloc } ml, err := newMemoryLimiterProcessor(processortest.NewNopSettings(), tt.mlCfg) @@ -213,7 +186,7 @@ func TestMetricsMemoryPressureResponse(t *testing.T) { processorhelper.WithShutdown(ml.shutdown)) require.NoError(t, err) - assert.NoError(t, mp.Start(ctx, &host{ballastSize: tt.ballastSize})) + assert.NoError(t, mp.Start(ctx, &host{})) ml.memlimiter.CheckMemLimits() err = mp.ConsumeMetrics(ctx, md) if tt.expectError { @@ -239,7 +212,6 @@ func TestTraceMemoryPressureResponse(t *testing.T) { tests := []struct { name string mlCfg *Config - ballastSize uint64 memAlloc uint64 expectError bool }{ @@ -250,7 +222,6 @@ func TestTraceMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 1, }, - ballastSize: 0, memAlloc: 800, expectError: false, }, @@ -261,29 +232,6 @@ func TestTraceMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 1, }, - ballastSize: 0, - memAlloc: 1800, - expectError: true, - }, - { - name: "Below memAllocLimit accounting for ballast", - mlCfg: &Config{ - CheckInterval: time.Second, - MemoryLimitPercentage: 50, - MemorySpikePercentage: 1, - }, - ballastSize: 1000, - memAlloc: 800, - expectError: false, - }, - { - name: "Above memAllocLimit even accounting for ballast", - mlCfg: &Config{ - CheckInterval: time.Second, - MemoryLimitPercentage: 50, - MemorySpikePercentage: 1, - }, - ballastSize: 1000, memAlloc: 1800, expectError: true, }, @@ -294,7 +242,6 @@ func TestTraceMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 10, }, - ballastSize: 0, memAlloc: 800, expectError: false, }, @@ -305,7 +252,6 @@ func TestTraceMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 11, }, - ballastSize: 0, memAlloc: 800, expectError: true, }, @@ -314,7 +260,7 @@ func TestTraceMemoryPressureResponse(t *testing.T) { t.Run(tt.name, func(t *testing.T) { memorylimiter.GetMemoryFn = totalMemory memorylimiter.ReadMemStatsFn = func(ms *runtime.MemStats) { - ms.Alloc = tt.memAlloc + tt.ballastSize + ms.Alloc = tt.memAlloc } ml, err := newMemoryLimiterProcessor(processortest.NewNopSettings(), tt.mlCfg) @@ -330,7 +276,7 @@ func TestTraceMemoryPressureResponse(t *testing.T) { processorhelper.WithShutdown(ml.shutdown)) require.NoError(t, err) - assert.NoError(t, tp.Start(ctx, &host{ballastSize: tt.ballastSize})) + assert.NoError(t, tp.Start(ctx, &host{})) ml.memlimiter.CheckMemLimits() err = tp.ConsumeTraces(ctx, td) if tt.expectError { @@ -356,7 +302,6 @@ func TestLogMemoryPressureResponse(t *testing.T) { tests := []struct { name string mlCfg *Config - ballastSize uint64 memAlloc uint64 expectError bool }{ @@ -367,7 +312,6 @@ func TestLogMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 1, }, - ballastSize: 0, memAlloc: 800, expectError: false, }, @@ -378,29 +322,6 @@ func TestLogMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 1, }, - ballastSize: 0, - memAlloc: 1800, - expectError: true, - }, - { - name: "Below memAllocLimit accounting for ballast", - mlCfg: &Config{ - CheckInterval: time.Second, - MemoryLimitPercentage: 50, - MemorySpikePercentage: 1, - }, - ballastSize: 1000, - memAlloc: 800, - expectError: false, - }, - { - name: "Above memAllocLimit even accounting for ballast", - mlCfg: &Config{ - CheckInterval: time.Second, - MemoryLimitPercentage: 50, - MemorySpikePercentage: 1, - }, - ballastSize: 1000, memAlloc: 1800, expectError: true, }, @@ -411,7 +332,6 @@ func TestLogMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 10, }, - ballastSize: 0, memAlloc: 800, expectError: false, }, @@ -422,7 +342,6 @@ func TestLogMemoryPressureResponse(t *testing.T) { MemoryLimitPercentage: 50, MemorySpikePercentage: 11, }, - ballastSize: 0, memAlloc: 800, expectError: true, }, @@ -431,7 +350,7 @@ func TestLogMemoryPressureResponse(t *testing.T) { t.Run(tt.name, func(t *testing.T) { memorylimiter.GetMemoryFn = totalMemory memorylimiter.ReadMemStatsFn = func(ms *runtime.MemStats) { - ms.Alloc = tt.memAlloc + tt.ballastSize + ms.Alloc = tt.memAlloc } ml, err := newMemoryLimiterProcessor(processortest.NewNopSettings(), tt.mlCfg) @@ -447,7 +366,7 @@ func TestLogMemoryPressureResponse(t *testing.T) { processorhelper.WithShutdown(ml.shutdown)) require.NoError(t, err) - assert.NoError(t, tp.Start(ctx, &host{ballastSize: tt.ballastSize})) + assert.NoError(t, tp.Start(ctx, &host{})) ml.memlimiter.CheckMemLimits() err = tp.ConsumeLogs(ctx, ld) if tt.expectError { @@ -465,26 +384,14 @@ func TestLogMemoryPressureResponse(t *testing.T) { } type host struct { - ballastSize uint64 component.Host } func (h *host) GetExtensions() map[component.ID]component.Component { ret := make(map[component.ID]component.Component) - ret[component.MustNewID("ballast")] = &ballastExtension{ballastSize: h.ballastSize} return ret } -type ballastExtension struct { - ballastSize uint64 - component.StartFunc - component.ShutdownFunc -} - -func (be *ballastExtension) GetBallastSize() uint64 { - return be.ballastSize -} - func totalMemory() (uint64, error) { return uint64(2048), nil } diff --git a/processor/processorhelper/documentation.md b/processor/processorhelper/documentation.md index 8ba4455fd1d..6091e8385a9 100644 --- a/processor/processorhelper/documentation.md +++ b/processor/processorhelper/documentation.md @@ -12,7 +12,7 @@ Number of log records successfully pushed into the next component in the pipelin | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_processor_accepted_metric_points @@ -20,7 +20,7 @@ Number of metric points successfully pushed into the next component in the pipel | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_processor_accepted_spans @@ -28,7 +28,7 @@ Number of spans successfully pushed into the next component in the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | ### otelcol_processor_dropped_log_records @@ -36,7 +36,7 @@ Number of log records that were dropped. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_processor_dropped_metric_points @@ -44,7 +44,7 @@ Number of metric points that were dropped. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_processor_dropped_spans @@ -52,7 +52,7 @@ Number of spans that were dropped. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | ### otelcol_processor_inserted_log_records @@ -60,7 +60,7 @@ Number of log records that were inserted. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_processor_inserted_metric_points @@ -68,7 +68,7 @@ Number of metric points that were inserted. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_processor_inserted_spans @@ -76,7 +76,7 @@ Number of spans that were inserted. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | ### otelcol_processor_refused_log_records @@ -84,7 +84,7 @@ Number of log records that were rejected by the next component in the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_processor_refused_metric_points @@ -92,7 +92,7 @@ Number of metric points that were rejected by the next component in the pipeline | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_processor_refused_spans @@ -100,4 +100,4 @@ Number of spans that were rejected by the next component in the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | diff --git a/processor/processorhelper/internal/metadata/generated_telemetry.go b/processor/processorhelper/internal/metadata/generated_telemetry.go index 337413c2db7..6c145bb65cd 100644 --- a/processor/processorhelper/internal/metadata/generated_telemetry.go +++ b/processor/processorhelper/internal/metadata/generated_telemetry.go @@ -66,73 +66,73 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme builder.ProcessorAcceptedLogRecords, err = builder.meter.Int64Counter( "otelcol_processor_accepted_log_records", metric.WithDescription("Number of log records successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ProcessorAcceptedMetricPoints, err = builder.meter.Int64Counter( "otelcol_processor_accepted_metric_points", metric.WithDescription("Number of metric points successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ProcessorAcceptedSpans, err = builder.meter.Int64Counter( "otelcol_processor_accepted_spans", metric.WithDescription("Number of spans successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ProcessorDroppedLogRecords, err = builder.meter.Int64Counter( "otelcol_processor_dropped_log_records", metric.WithDescription("Number of log records that were dropped."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ProcessorDroppedMetricPoints, err = builder.meter.Int64Counter( "otelcol_processor_dropped_metric_points", metric.WithDescription("Number of metric points that were dropped."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ProcessorDroppedSpans, err = builder.meter.Int64Counter( "otelcol_processor_dropped_spans", metric.WithDescription("Number of spans that were dropped."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ProcessorInsertedLogRecords, err = builder.meter.Int64Counter( "otelcol_processor_inserted_log_records", metric.WithDescription("Number of log records that were inserted."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ProcessorInsertedMetricPoints, err = builder.meter.Int64Counter( "otelcol_processor_inserted_metric_points", metric.WithDescription("Number of metric points that were inserted."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ProcessorInsertedSpans, err = builder.meter.Int64Counter( "otelcol_processor_inserted_spans", metric.WithDescription("Number of spans that were inserted."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ProcessorRefusedLogRecords, err = builder.meter.Int64Counter( "otelcol_processor_refused_log_records", metric.WithDescription("Number of log records that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ProcessorRefusedMetricPoints, err = builder.meter.Int64Counter( "otelcol_processor_refused_metric_points", metric.WithDescription("Number of metric points that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ProcessorRefusedSpans, err = builder.meter.Int64Counter( "otelcol_processor_refused_spans", metric.WithDescription("Number of spans that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) return &builder, errs diff --git a/processor/processorhelper/metadata.yaml b/processor/processorhelper/metadata.yaml index a1baa27827d..95acaf640b8 100644 --- a/processor/processorhelper/metadata.yaml +++ b/processor/processorhelper/metadata.yaml @@ -12,7 +12,7 @@ telemetry: processor_accepted_spans: enabled: true description: Number of spans successfully pushed into the next component in the pipeline. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -20,7 +20,7 @@ telemetry: processor_refused_spans: enabled: true description: Number of spans that were rejected by the next component in the pipeline. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -28,7 +28,7 @@ telemetry: processor_dropped_spans: enabled: true description: Number of spans that were dropped. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -36,7 +36,7 @@ telemetry: processor_inserted_spans: enabled: true description: Number of spans that were inserted. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -44,7 +44,7 @@ telemetry: processor_accepted_metric_points: enabled: true description: Number of metric points successfully pushed into the next component in the pipeline. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -52,7 +52,7 @@ telemetry: processor_refused_metric_points: enabled: true description: Number of metric points that were rejected by the next component in the pipeline. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -60,7 +60,7 @@ telemetry: processor_dropped_metric_points: enabled: true description: Number of metric points that were dropped. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -68,7 +68,7 @@ telemetry: processor_inserted_metric_points: enabled: true description: Number of metric points that were inserted. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -76,7 +76,7 @@ telemetry: processor_accepted_log_records: enabled: true description: Number of log records successfully pushed into the next component in the pipeline. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true @@ -84,7 +84,7 @@ telemetry: processor_refused_log_records: enabled: true description: Number of log records that were rejected by the next component in the pipeline. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true @@ -92,7 +92,7 @@ telemetry: processor_dropped_log_records: enabled: true description: Number of log records that were dropped. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true @@ -100,7 +100,7 @@ telemetry: processor_inserted_log_records: enabled: true description: Number of log records that were inserted. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true diff --git a/processor/processorhelper/obsreport.go b/processor/processorhelper/obsreport.go index 2a3bd0dc754..baebb6b3b0c 100644 --- a/processor/processorhelper/obsreport.go +++ b/processor/processorhelper/obsreport.go @@ -32,8 +32,8 @@ func BuildCustomMetricName(configType, metric string) string { // ObsReport is a helper to add observability to a processor. type ObsReport struct { - otelAttrs []attribute.KeyValue - telemetryBuilder *metadata.TelemetryBuilder + otelAttrs []attribute.KeyValue + telBuilder *metadata.TelemetryBuilder } // ObsReportSettings are settings for creating an ObsReport. @@ -48,7 +48,7 @@ func NewObsReport(cfg ObsReportSettings) (*ObsReport, error) { } func newObsReport(cfg ObsReportSettings) (*ObsReport, error) { - telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ProcessorCreateSettings.TelemetrySettings, metadata.WithLevel(cfg.ProcessorCreateSettings.MetricsLevel)) + telBuilder, err := metadata.NewTelemetryBuilder(cfg.ProcessorCreateSettings.TelemetrySettings, metadata.WithLevel(cfg.ProcessorCreateSettings.MetricsLevel)) if err != nil { return nil, err } @@ -56,92 +56,66 @@ func newObsReport(cfg ObsReportSettings) (*ObsReport, error) { otelAttrs: []attribute.KeyValue{ attribute.String(obsmetrics.ProcessorKey, cfg.ProcessorID.String()), }, - telemetryBuilder: telemetryBuilder, + telBuilder: telBuilder, }, nil } -func (or *ObsReport) recordData(ctx context.Context, dataType component.DataType, accepted, refused, dropped, inserted int64) { - var acceptedCount, refusedCount, droppedCount, insertedCount metric.Int64Counter - switch dataType { - case component.DataTypeTraces: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedSpans - refusedCount = or.telemetryBuilder.ProcessorRefusedSpans - droppedCount = or.telemetryBuilder.ProcessorDroppedSpans - insertedCount = or.telemetryBuilder.ProcessorInsertedSpans - case component.DataTypeMetrics: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedMetricPoints - refusedCount = or.telemetryBuilder.ProcessorRefusedMetricPoints - droppedCount = or.telemetryBuilder.ProcessorDroppedMetricPoints - insertedCount = or.telemetryBuilder.ProcessorInsertedMetricPoints - case component.DataTypeLogs: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedLogRecords - refusedCount = or.telemetryBuilder.ProcessorRefusedLogRecords - droppedCount = or.telemetryBuilder.ProcessorDroppedLogRecords - insertedCount = or.telemetryBuilder.ProcessorInsertedLogRecords - } - - acceptedCount.Add(ctx, accepted, metric.WithAttributes(or.otelAttrs...)) - refusedCount.Add(ctx, refused, metric.WithAttributes(or.otelAttrs...)) - droppedCount.Add(ctx, dropped, metric.WithAttributes(or.otelAttrs...)) - insertedCount.Add(ctx, inserted, metric.WithAttributes(or.otelAttrs...)) -} - // TracesAccepted reports that the trace data was accepted. func (or *ObsReport) TracesAccepted(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(numSpans), int64(0), int64(0), int64(0)) + or.telBuilder.ProcessorAcceptedSpans.Add(ctx, int64(numSpans), metric.WithAttributes(or.otelAttrs...)) } // TracesRefused reports that the trace data was refused. func (or *ObsReport) TracesRefused(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(numSpans), int64(0), int64(0)) + or.telBuilder.ProcessorRefusedSpans.Add(ctx, int64(numSpans), metric.WithAttributes(or.otelAttrs...)) } // TracesDropped reports that the trace data was dropped. func (or *ObsReport) TracesDropped(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(0), int64(numSpans), int64(0)) + or.telBuilder.ProcessorDroppedSpans.Add(ctx, int64(numSpans), metric.WithAttributes(or.otelAttrs...)) } // TracesInserted reports that the trace data was inserted. func (or *ObsReport) TracesInserted(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(0), int64(0), int64(numSpans)) + or.telBuilder.ProcessorInsertedSpans.Add(ctx, int64(numSpans), metric.WithAttributes(or.otelAttrs...)) } // MetricsAccepted reports that the metrics were accepted. func (or *ObsReport) MetricsAccepted(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(numPoints), int64(0), int64(0), int64(0)) + or.telBuilder.ProcessorAcceptedMetricPoints.Add(ctx, int64(numPoints), metric.WithAttributes(or.otelAttrs...)) } // MetricsRefused reports that the metrics were refused. func (or *ObsReport) MetricsRefused(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(numPoints), int64(0), int64(0)) + or.telBuilder.ProcessorRefusedMetricPoints.Add(ctx, int64(numPoints), metric.WithAttributes(or.otelAttrs...)) } // MetricsDropped reports that the metrics were dropped. func (or *ObsReport) MetricsDropped(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(0), int64(numPoints), int64(0)) + or.telBuilder.ProcessorDroppedMetricPoints.Add(ctx, int64(numPoints), metric.WithAttributes(or.otelAttrs...)) } // MetricsInserted reports that the metrics were inserted. func (or *ObsReport) MetricsInserted(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(0), int64(0), int64(numPoints)) + or.telBuilder.ProcessorInsertedMetricPoints.Add(ctx, int64(numPoints), metric.WithAttributes(or.otelAttrs...)) } // LogsAccepted reports that the logs were accepted. func (or *ObsReport) LogsAccepted(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(numRecords), int64(0), int64(0), int64(0)) + or.telBuilder.ProcessorAcceptedLogRecords.Add(ctx, int64(numRecords), metric.WithAttributes(or.otelAttrs...)) } // LogsRefused reports that the logs were refused. func (or *ObsReport) LogsRefused(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(numRecords), int64(0), int64(0)) + or.telBuilder.ProcessorRefusedLogRecords.Add(ctx, int64(numRecords), metric.WithAttributes(or.otelAttrs...)) } // LogsDropped reports that the logs were dropped. func (or *ObsReport) LogsDropped(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(0), int64(numRecords), int64(0)) + or.telBuilder.ProcessorDroppedLogRecords.Add(ctx, int64(numRecords), metric.WithAttributes(or.otelAttrs...)) } // LogsInserted reports that the logs were inserted. func (or *ObsReport) LogsInserted(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(0), int64(0), int64(numRecords)) + or.telBuilder.ProcessorInsertedLogRecords.Add(ctx, int64(numRecords), metric.WithAttributes(or.otelAttrs...)) } diff --git a/receiver/go.mod b/receiver/go.mod index 544ed727f44..abe1f937512 100644 --- a/receiver/go.mod +++ b/receiver/go.mod @@ -9,6 +9,7 @@ require ( go.opentelemetry.io/collector/component v0.105.0 go.opentelemetry.io/collector/config/configtelemetry v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel/metric v1.28.0 @@ -36,6 +37,8 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect golang.org/x/net v0.26.0 // indirect golang.org/x/sys v0.21.0 // indirect @@ -67,3 +70,7 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ../config/config replace go.opentelemetry.io/collector/pdata/pprofile => ../pdata/pprofile replace go.opentelemetry.io/collector/internal/globalgates => ../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../consumer/consumertest diff --git a/receiver/internal/factory.go b/receiver/internal/factory.go index 0b6cb17c7ef..24d05785ff8 100644 --- a/receiver/internal/factory.go +++ b/receiver/internal/factory.go @@ -18,24 +18,27 @@ type Factory interface { component.Factory // CreateTracesReceiver creates a TracesReceiver based on this config. - // If the receiver type does not support tracing or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. + // If the receiver type does not support traces, + // this function returns the error [component.ErrDataTypeIsNotSupported]. + // Implementers can assume `nextConsumer` is never nil. CreateTracesReceiver(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) // TracesReceiverStability gets the stability level of the TracesReceiver. TracesReceiverStability() component.StabilityLevel // CreateMetricsReceiver creates a MetricsReceiver based on this config. - // If the receiver type does not support metrics or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. + // If the receiver type does not support metrics, + // this function returns the error [component.ErrDataTypeIsNotSupported]. + // Implementers can assume `nextConsumer` is never nil. CreateMetricsReceiver(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) // MetricsReceiverStability gets the stability level of the MetricsReceiver. MetricsReceiverStability() component.StabilityLevel // CreateLogsReceiver creates a LogsReceiver based on this config. - // If the receiver type does not support the data type or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. + // If the receiver type does not support logs, + // this function returns the error [component.ErrDataTypeIsNotSupported]. + // Implementers can assume `nextConsumer` is never nil. CreateLogsReceiver(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) // LogsReceiverStability gets the stability level of the LogsReceiver. diff --git a/receiver/nopreceiver/go.mod b/receiver/nopreceiver/go.mod index 8b5eb3f3d4d..dc2f668029c 100644 --- a/receiver/nopreceiver/go.mod +++ b/receiver/nopreceiver/go.mod @@ -7,6 +7,7 @@ require ( go.opentelemetry.io/collector/component v0.105.0 go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/receiver v0.105.0 go.uber.org/goleak v1.3.0 ) @@ -36,9 +37,11 @@ require ( github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect go.opentelemetry.io/collector/internal/globalgates v0.105.0 // indirect go.opentelemetry.io/collector/pdata v1.12.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect @@ -77,3 +80,7 @@ replace go.opentelemetry.io/collector/internal/globalgates => ../../internal/glo replace go.opentelemetry.io/collector/featuregate => ../../featuregate replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/receiver/otlpreceiver/go.mod b/receiver/otlpreceiver/go.mod index 69356d7bf91..86e85179259 100644 --- a/receiver/otlpreceiver/go.mod +++ b/receiver/otlpreceiver/go.mod @@ -14,6 +14,7 @@ require ( go.opentelemetry.io/collector/config/configtls v1.12.0 go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/pdata/testdata v0.105.0 go.opentelemetry.io/collector/receiver v0.105.0 @@ -59,6 +60,7 @@ require ( go.opentelemetry.io/collector/config/configopaque v1.12.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.105.0 // indirect go.opentelemetry.io/collector/config/internal v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/extension v0.105.0 // indirect go.opentelemetry.io/collector/extension/auth v0.105.0 // indirect go.opentelemetry.io/collector/featuregate v1.12.0 // indirect @@ -138,3 +140,7 @@ retract ( replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile replace go.opentelemetry.io/collector/internal/globalgates => ../../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest diff --git a/receiver/receiverhelper/documentation.md b/receiver/receiverhelper/documentation.md index d3990dfe998..55e8657e6e0 100644 --- a/receiver/receiverhelper/documentation.md +++ b/receiver/receiverhelper/documentation.md @@ -12,7 +12,7 @@ Number of log records successfully pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_receiver_accepted_metric_points @@ -20,7 +20,7 @@ Number of metric points successfully pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_receiver_accepted_spans @@ -28,7 +28,7 @@ Number of spans successfully pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | ### otelcol_receiver_refused_log_records @@ -36,7 +36,7 @@ Number of log records that could not be pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | ### otelcol_receiver_refused_metric_points @@ -44,7 +44,7 @@ Number of metric points that could not be pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_receiver_refused_spans @@ -52,4 +52,4 @@ Number of spans that could not be pushed into the pipeline. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | diff --git a/receiver/receiverhelper/internal/metadata/generated_telemetry.go b/receiver/receiverhelper/internal/metadata/generated_telemetry.go index bb2ed6c7d1b..8e0b5a8455a 100644 --- a/receiver/receiverhelper/internal/metadata/generated_telemetry.go +++ b/receiver/receiverhelper/internal/metadata/generated_telemetry.go @@ -60,37 +60,37 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme builder.ReceiverAcceptedLogRecords, err = builder.meter.Int64Counter( "otelcol_receiver_accepted_log_records", metric.WithDescription("Number of log records successfully pushed into the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ReceiverAcceptedMetricPoints, err = builder.meter.Int64Counter( "otelcol_receiver_accepted_metric_points", metric.WithDescription("Number of metric points successfully pushed into the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ReceiverAcceptedSpans, err = builder.meter.Int64Counter( "otelcol_receiver_accepted_spans", metric.WithDescription("Number of spans successfully pushed into the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) builder.ReceiverRefusedLogRecords, err = builder.meter.Int64Counter( "otelcol_receiver_refused_log_records", metric.WithDescription("Number of log records that could not be pushed into the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) builder.ReceiverRefusedMetricPoints, err = builder.meter.Int64Counter( "otelcol_receiver_refused_metric_points", metric.WithDescription("Number of metric points that could not be pushed into the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ReceiverRefusedSpans, err = builder.meter.Int64Counter( "otelcol_receiver_refused_spans", metric.WithDescription("Number of spans that could not be pushed into the pipeline."), - metric.WithUnit("1"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) return &builder, errs diff --git a/receiver/receiverhelper/metadata.yaml b/receiver/receiverhelper/metadata.yaml index db8dec36922..373b6838262 100644 --- a/receiver/receiverhelper/metadata.yaml +++ b/receiver/receiverhelper/metadata.yaml @@ -12,7 +12,7 @@ telemetry: receiver_accepted_spans: enabled: true description: Number of spans successfully pushed into the pipeline. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -20,7 +20,7 @@ telemetry: receiver_refused_spans: enabled: true description: Number of spans that could not be pushed into the pipeline. - unit: "1" + unit: "{spans}" sum: value_type: int monotonic: true @@ -28,7 +28,7 @@ telemetry: receiver_accepted_metric_points: enabled: true description: Number of metric points successfully pushed into the pipeline. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -36,7 +36,7 @@ telemetry: receiver_refused_metric_points: enabled: true description: Number of metric points that could not be pushed into the pipeline. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -44,7 +44,7 @@ telemetry: receiver_accepted_log_records: enabled: true description: Number of log records successfully pushed into the pipeline. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true @@ -52,7 +52,7 @@ telemetry: receiver_refused_log_records: enabled: true description: Number of log records that could not be pushed into the pipeline. - unit: "1" + unit: "{records}" sum: value_type: int monotonic: true \ No newline at end of file diff --git a/receiver/scraperhelper/documentation.md b/receiver/scraperhelper/documentation.md index a582be9c5f1..56ad47a5105 100644 --- a/receiver/scraperhelper/documentation.md +++ b/receiver/scraperhelper/documentation.md @@ -12,7 +12,7 @@ Number of metric points that were unable to be scraped. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | ### otelcol_scraper_scraped_metric_points @@ -20,4 +20,4 @@ Number of metric points successfully scraped. | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | diff --git a/receiver/scraperhelper/internal/metadata/generated_telemetry.go b/receiver/scraperhelper/internal/metadata/generated_telemetry.go index a366c7436fb..98cf15f4123 100644 --- a/receiver/scraperhelper/internal/metadata/generated_telemetry.go +++ b/receiver/scraperhelper/internal/metadata/generated_telemetry.go @@ -56,13 +56,13 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme builder.ScraperErroredMetricPoints, err = builder.meter.Int64Counter( "otelcol_scraper_errored_metric_points", metric.WithDescription("Number of metric points that were unable to be scraped."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) builder.ScraperScrapedMetricPoints, err = builder.meter.Int64Counter( "otelcol_scraper_scraped_metric_points", metric.WithDescription("Number of metric points successfully scraped."), - metric.WithUnit("1"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) return &builder, errs diff --git a/receiver/scraperhelper/metadata.yaml b/receiver/scraperhelper/metadata.yaml index 8cf497fcd2c..b5a2e34efcb 100644 --- a/receiver/scraperhelper/metadata.yaml +++ b/receiver/scraperhelper/metadata.yaml @@ -12,7 +12,7 @@ telemetry: scraper_scraped_metric_points: enabled: true description: Number of metric points successfully scraped. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true @@ -20,7 +20,7 @@ telemetry: scraper_errored_metric_points: enabled: true description: Number of metric points that were unable to be scraped. - unit: "1" + unit: "{datapoints}" sum: value_type: int monotonic: true \ No newline at end of file diff --git a/service/go.mod b/service/go.mod index f7c8bec2693..1ff29ae6e66 100644 --- a/service/go.mod +++ b/service/go.mod @@ -17,6 +17,7 @@ require ( go.opentelemetry.io/collector/confmap v0.105.0 go.opentelemetry.io/collector/connector v0.105.0 go.opentelemetry.io/collector/consumer v0.105.0 + go.opentelemetry.io/collector/consumer/consumertest v0.105.0 go.opentelemetry.io/collector/exporter v0.105.0 go.opentelemetry.io/collector/extension v0.105.0 go.opentelemetry.io/collector/extension/zpagesextension v0.105.0 @@ -85,6 +86,7 @@ require ( go.opentelemetry.io/collector/config/configopaque v1.12.0 // indirect go.opentelemetry.io/collector/config/configtls v1.12.0 // indirect go.opentelemetry.io/collector/config/internal v0.105.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.105.0 // indirect go.opentelemetry.io/collector/extension/auth v0.105.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.105.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect @@ -156,3 +158,7 @@ replace go.opentelemetry.io/collector/config/configcompression => ../config/conf replace go.opentelemetry.io/collector/pdata/pprofile => ../pdata/pprofile replace go.opentelemetry.io/collector/internal/globalgates => ../internal/globalgates + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/consumer/consumertest => ../consumer/consumertest diff --git a/service/internal/graph/graph_test.go b/service/internal/graph/graph_test.go index d3a0984527a..6c1ffc0aab7 100644 --- a/service/internal/graph/graph_test.go +++ b/service/internal/graph/graph_test.go @@ -2084,7 +2084,7 @@ func TestGraphBuildErrors(t *testing.T) { } } -// This includes all tests from the previous implmentation, plus a new one +// This includes all tests from the previous implementation, plus a new one // relevant only to the new graph-based implementation. func TestGraphFailToStartAndShutdown(t *testing.T) { errReceiverFactory := newErrReceiverFactory() diff --git a/service/internal/proctelemetry/process_telemetry.go b/service/internal/proctelemetry/process_telemetry.go index 0b978758104..e7a0cc1454a 100644 --- a/service/internal/proctelemetry/process_telemetry.go +++ b/service/internal/proctelemetry/process_telemetry.go @@ -21,7 +21,6 @@ import ( // processMetrics is a struct that contains views related to process metrics (cpu, mem, etc) type processMetrics struct { startTimeUnixNano int64 - ballastSizeBytes uint64 proc *process.Process context context.Context @@ -54,7 +53,7 @@ func WithHostProc(hostProc string) RegisterOption { // RegisterProcessMetrics creates a new set of processMetrics (mem, cpu) that can be used to measure // basic information about this process. -func RegisterProcessMetrics(cfg servicetelemetry.TelemetrySettings, ballastSizeBytes uint64, opts ...RegisterOption) error { +func RegisterProcessMetrics(cfg servicetelemetry.TelemetrySettings, opts ...RegisterOption) error { set := registerOption{} for _, opt := range opts { opt.apply(&set) @@ -62,7 +61,6 @@ func RegisterProcessMetrics(cfg servicetelemetry.TelemetrySettings, ballastSizeB var err error pm := &processMetrics{ startTimeUnixNano: time.Now().UnixNano(), - ballastSizeBytes: ballastSizeBytes, ms: &runtime.MemStats{}, } @@ -139,10 +137,4 @@ func (pm *processMetrics) readMemStatsIfNeeded() { } pm.lastMsRead = now runtime.ReadMemStats(pm.ms) - if pm.ballastSizeBytes > 0 { - pm.ms.Alloc -= pm.ballastSizeBytes - pm.ms.HeapAlloc -= pm.ballastSizeBytes - pm.ms.HeapSys -= pm.ballastSizeBytes - pm.ms.HeapInuse -= pm.ballastSizeBytes - } } diff --git a/service/internal/proctelemetry/process_telemetry_linux_test.go b/service/internal/proctelemetry/process_telemetry_linux_test.go index 73605c0ae8e..99471b4eca2 100644 --- a/service/internal/proctelemetry/process_telemetry_linux_test.go +++ b/service/internal/proctelemetry/process_telemetry_linux_test.go @@ -21,7 +21,7 @@ func TestProcessTelemetryWithHostProc(t *testing.T) { // Make the sure the environment variable value is not used. t.Setenv("HOST_PROC", "foo/bar") - require.NoError(t, RegisterProcessMetrics(tel.TelemetrySettings, 0, WithHostProc("/proc"))) + require.NoError(t, RegisterProcessMetrics(tel.TelemetrySettings, WithHostProc("/proc"))) // Check that the metrics are actually filled. time.Sleep(200 * time.Millisecond) diff --git a/service/internal/proctelemetry/process_telemetry_test.go b/service/internal/proctelemetry/process_telemetry_test.go index cccc5ad8e30..fb750fcf664 100644 --- a/service/internal/proctelemetry/process_telemetry_test.go +++ b/service/internal/proctelemetry/process_telemetry_test.go @@ -78,7 +78,7 @@ func fetchPrometheusMetrics(handler http.Handler) (map[string]*io_prometheus_cli func TestProcessTelemetry(t *testing.T) { tel := setupTelemetry(t) - require.NoError(t, RegisterProcessMetrics(tel.TelemetrySettings, 0)) + require.NoError(t, RegisterProcessMetrics(tel.TelemetrySettings)) mp, err := fetchPrometheusMetrics(tel.promHandler) require.NoError(t, err) diff --git a/service/service.go b/service/service.go index 99747db9ac6..8b57bd12cad 100644 --- a/service/service.go +++ b/service/service.go @@ -159,7 +159,7 @@ func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { if cfg.Telemetry.Metrics.Level != configtelemetry.LevelNone && cfg.Telemetry.Metrics.Address != "" { // The process telemetry initialization requires the ballast size, which is available after the extensions are initialized. - if err = proctelemetry.RegisterProcessMetrics(srv.telemetrySettings, getBallastSize(srv.host)); err != nil { + if err = proctelemetry.RegisterProcessMetrics(srv.telemetrySettings); err != nil { return nil, fmt.Errorf("failed to register process metrics: %w", err) } } @@ -316,15 +316,6 @@ func (srv *Service) Logger() *zap.Logger { return srv.telemetrySettings.Logger } -func getBallastSize(host component.Host) uint64 { - for _, ext := range host.GetExtensions() { - if bExt, ok := ext.(interface{ GetBallastSize() uint64 }); ok { - return bExt.GetBallastSize() - } - } - return 0 -} - func pdataFromSdk(res *sdkresource.Resource) pcommon.Resource { // pcommon.NewResource is the best way to generate a new resource currently and is safe to use outside of tests. // Because the resource is signal agnostic, and we need a net new resource, not an existing one, this is the only diff --git a/service/telemetry/tracer.go b/service/telemetry/tracer.go index 65f49606514..4434af2fc74 100644 --- a/service/telemetry/tracer.go +++ b/service/telemetry/tracer.go @@ -25,8 +25,7 @@ var ( errUnsupportedPropagator = errors.New("unsupported trace propagator") ) -// New creates a new Telemetry from Config. -func newTracerProvider(ctx context.Context, set Settings, cfg Config) (trace.TracerProvider, error) { +func attributes(set Settings, cfg Config) map[string]interface{} { attrs := map[string]interface{}{ string(semconv.ServiceNameKey): set.BuildInfo.Command, string(semconv.ServiceVersionKey): set.BuildInfo.Version, @@ -41,10 +40,15 @@ func newTracerProvider(ctx context.Context, set Settings, cfg Config) (trace.Tra delete(attrs, k) } } + return attrs +} + +// New creates a new Telemetry from Config. +func newTracerProvider(ctx context.Context, set Settings, cfg Config) (trace.TracerProvider, error) { sch := semconv.SchemaURL res := config.Resource{ SchemaUrl: &sch, - Attributes: attrs, + Attributes: attributes(set, cfg), } sdk, err := config.NewSDK( diff --git a/service/telemetry/tracer_test.go b/service/telemetry/tracer_test.go new file mode 100644 index 00000000000..6a86d284b58 --- /dev/null +++ b/service/telemetry/tracer_test.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/collector/service/telemetry" + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/service/telemetry/internal" +) + +func TestAttributes(t *testing.T) { + tests := []struct { + name string + cfg Config + buildInfo component.BuildInfo + wantAttributes map[string]interface{} + }{ + { + name: "no build info and no resource config", + cfg: Config{}, + wantAttributes: map[string]interface{}{"service.name": "", "service.version": ""}, + }, + { + name: "build info and no resource config", + cfg: Config{}, + buildInfo: component.BuildInfo{Command: "otelcoltest", Version: "0.0.0-test"}, + wantAttributes: map[string]interface{}{"service.name": "otelcoltest", "service.version": "0.0.0-test"}, + }, + { + name: "no build info and resource config", + cfg: Config{Resource: map[string]*string{"service.name": ptr("resource.name"), "service.version": ptr("resource.version"), "test": ptr("test")}}, + wantAttributes: map[string]interface{}{"service.name": "resource.name", "service.version": "resource.version", "test": "test"}, + }, + { + name: "build info and resource config", + buildInfo: component.BuildInfo{Command: "otelcoltest", Version: "0.0.0-test"}, + cfg: Config{Resource: map[string]*string{"service.name": ptr("resource.name"), "service.version": ptr("resource.version"), "test": ptr("test")}}, + wantAttributes: map[string]interface{}{"service.name": "resource.name", "service.version": "resource.version", "test": "test"}, + }, + { + name: "deleting a nil value", + buildInfo: component.BuildInfo{Command: "otelcoltest", Version: "0.0.0-test"}, + cfg: Config{Resource: map[string]*string{"service.name": nil, "service.version": ptr("resource.version"), "test": ptr("test")}}, + wantAttributes: map[string]interface{}{"service.version": "resource.version", "test": "test"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + attrs := attributes(internal.Settings{BuildInfo: tt.buildInfo}, tt.cfg) + require.Equal(t, tt.wantAttributes, attrs) + }) + } +} + +func ptr[T any](v T) *T { + return &v +} diff --git a/versions.yaml b/versions.yaml index 4f6073a32f0..7ee33e71b7b 100644 --- a/versions.yaml +++ b/versions.yaml @@ -37,6 +37,7 @@ module-sets: - go.opentelemetry.io/collector/connector/forwardconnector - go.opentelemetry.io/collector/consumer - go.opentelemetry.io/collector/consumer/consumerprofiles + - go.opentelemetry.io/collector/consumer/consumertest - go.opentelemetry.io/collector/exporter - go.opentelemetry.io/collector/exporter/debugexporter - go.opentelemetry.io/collector/exporter/loggingexporter