From 816d80c4b01e56a8723c21bfff76183e446276ec Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Thu, 14 Nov 2024 10:32:26 -0500 Subject: [PATCH 01/14] [chore][plogutiltest] Use native structs in options (#36354) --- .../internal/plogutil/logs_test.go | 60 ++++++++++++++----- .../internal/plogutiltest/logs.go | 60 ++++++++----------- .../internal/plogutiltest/logs_test.go | 35 +++++++---- connector/routingconnector/logs_test.go | 11 +++- 4 files changed, 100 insertions(+), 66 deletions(-) diff --git a/connector/routingconnector/internal/plogutil/logs_test.go b/connector/routingconnector/internal/plogutil/logs_test.go index 5f0a4b00975a..56637d3f9a28 100644 --- a/connector/routingconnector/internal/plogutil/logs_test.go +++ b/connector/routingconnector/internal/plogutil/logs_test.go @@ -63,9 +63,14 @@ func TestMoveResourcesIf(t *testing.T) { from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plogutiltest.NewLogs("1", "2", "3"), expectFrom: plogutiltest.NewLogs("A", "CD", "EF"), - expectTo: plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('1', plogutiltest.WithScope('2', "3")), - plogutiltest.WithResource('B', plogutiltest.WithScope('C', "EF"), plogutiltest.WithScope('D', "EF")), + expectTo: plogutiltest.New( + plogutiltest.Resource("1", + plogutiltest.Scope("2", plogutiltest.LogRecord("3")), + ), + plogutiltest.Resource("B", + plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), ), }, } @@ -127,9 +132,14 @@ func TestMoveRecordsWithContextIf(t *testing.T) { }, from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plog.NewLogs(), - expectFrom: plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('C', "EF"), plogutiltest.WithScope('D', "EF")), - plogutiltest.WithResource('B', plogutiltest.WithScope('D', "EF")), + expectFrom: plogutiltest.New( + plogutiltest.Resource("A", + plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), + plogutiltest.Resource("B", + plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), ), expectTo: plogutiltest.NewLogs("B", "C", "EF"), }, @@ -151,9 +161,15 @@ func TestMoveRecordsWithContextIf(t *testing.T) { }, from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plog.NewLogs(), - expectFrom: plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('C', "EF"), plogutiltest.WithScope('D', "E")), - plogutiltest.WithResource('B', plogutiltest.WithScope('C', "EF"), plogutiltest.WithScope('D', "EF")), + expectFrom: plogutiltest.New( + plogutiltest.Resource("A", + plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + plogutiltest.Scope("D", plogutiltest.LogRecord("E")), + ), + plogutiltest.Resource("B", + plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), ), expectTo: plogutiltest.NewLogs("A", "D", "F"), }, @@ -175,9 +191,15 @@ func TestMoveRecordsWithContextIf(t *testing.T) { }, from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plog.NewLogs(), - expectFrom: plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('C', "EF"), plogutiltest.WithScope('D', "EF")), - plogutiltest.WithResource('B', plogutiltest.WithScope('C', "F"), plogutiltest.WithScope('D', "F")), + expectFrom: plogutiltest.New( + plogutiltest.Resource("A", + plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), + plogutiltest.Resource("B", + plogutiltest.Scope("C", plogutiltest.LogRecord("F")), + plogutiltest.Scope("D", plogutiltest.LogRecord("F")), + ), ), expectTo: plogutiltest.NewLogs("B", "CD", "E"), }, @@ -189,10 +211,16 @@ func TestMoveRecordsWithContextIf(t *testing.T) { from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plogutiltest.NewLogs("1", "2", "3"), expectFrom: plogutiltest.NewLogs("AB", "C", "EF"), - expectTo: plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('1', plogutiltest.WithScope('2', "3")), - plogutiltest.WithResource('A', plogutiltest.WithScope('D', "EF")), - plogutiltest.WithResource('B', plogutiltest.WithScope('D', "EF")), + expectTo: plogutiltest.New( + plogutiltest.Resource("1", + plogutiltest.Scope("2", plogutiltest.LogRecord("3")), + ), + plogutiltest.Resource("A", + plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), + plogutiltest.Resource("B", + plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), ), }, } diff --git a/connector/routingconnector/internal/plogutiltest/logs.go b/connector/routingconnector/internal/plogutiltest/logs.go index 034162eb25b8..675d5ad3eb85 100644 --- a/connector/routingconnector/internal/plogutiltest/logs.go +++ b/connector/routingconnector/internal/plogutiltest/logs.go @@ -5,7 +5,7 @@ package plogutiltest // import "github.com/open-telemetry/opentelemetry-collecto import "go.opentelemetry.io/collector/pdata/plog" -// TestLogs returns a plog.Logs with a uniform structure where resources, scopes, and +// NewLogs returns a plog.Logs with a uniform structure where resources, scopes, and // log records are identical across all instances, except for one identifying field. // // Identifying fields: @@ -13,7 +13,7 @@ import "go.opentelemetry.io/collector/pdata/plog" // - Scopes have a name with a value of "scopeN". // - LogRecords have a body with a value of "logN". // -// Example: TestLogs("AB", "XYZ", "1234") returns: +// Example: NewLogs("AB", "XYZ", "1234") returns: // // resourceA, resourceB // each with scopeX, scopeY, scopeZ @@ -37,42 +37,34 @@ func NewLogs(resourceIDs, scopeIDs, logRecordIDs string) plog.Logs { return ld } -type Resource struct { - id byte - scopes []Scope -} - -type Scope struct { - id byte - logs string +func New(resources ...plog.ResourceLogs) plog.Logs { + ld := plog.NewLogs() + for _, resource := range resources { + resource.CopyTo(ld.ResourceLogs().AppendEmpty()) + } + return ld } -func WithResource(id byte, scopes ...Scope) Resource { - r := Resource{id: id} - r.scopes = append(r.scopes, scopes...) - return r +func Resource(id string, scopes ...plog.ScopeLogs) plog.ResourceLogs { + rl := plog.NewResourceLogs() + rl.Resource().Attributes().PutStr("resourceName", "resource"+id) + for _, scope := range scopes { + scope.CopyTo(rl.ScopeLogs().AppendEmpty()) + } + return rl } -func WithScope(id byte, logs string) Scope { - return Scope{id: id, logs: logs} +func Scope(id string, logs ...plog.LogRecord) plog.ScopeLogs { + s := plog.NewScopeLogs() + s.Scope().SetName("scope" + id) + for _, log := range logs { + log.CopyTo(s.LogRecords().AppendEmpty()) + } + return s } -// NewLogsFromOpts creates a plog.Logs with the specified resources, scopes, and logs. -// The general idea is the same as NewLogs, but this function allows for more flexibility -// in creating non-uniform structures. -func NewLogsFromOpts(resources ...Resource) plog.Logs { - ld := plog.NewLogs() - for _, resource := range resources { - r := ld.ResourceLogs().AppendEmpty() - r.Resource().Attributes().PutStr("resourceName", "resource"+string(resource.id)) - for _, scope := range resource.scopes { - s := r.ScopeLogs().AppendEmpty() - s.Scope().SetName("scope" + string(scope.id)) - for i := 0; i < len(scope.logs); i++ { - l := s.LogRecords().AppendEmpty() - l.Body().SetStr("log" + string(scope.logs[i])) - } - } - } - return ld +func LogRecord(id string) plog.LogRecord { + lr := plog.NewLogRecord() + lr.Body().SetStr("log" + id) + return lr } diff --git a/connector/routingconnector/internal/plogutiltest/logs_test.go b/connector/routingconnector/internal/plogutiltest/logs_test.go index b056e51eef5e..7b3378bbabf4 100644 --- a/connector/routingconnector/internal/plogutiltest/logs_test.go +++ b/connector/routingconnector/internal/plogutiltest/logs_test.go @@ -17,7 +17,8 @@ func TestNewLogs(t *testing.T) { t.Run("empty", func(t *testing.T) { expected := plog.NewLogs() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("", "", ""))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts())) + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New())) + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New())) }) t.Run("simple", func(t *testing.T) { @@ -32,8 +33,8 @@ func TestNewLogs(t *testing.T) { return ld }() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("A", "B", "C"))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('B', "C")), + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + plogutiltest.Resource("A", plogutiltest.Scope("B", plogutiltest.LogRecord("C"))), ))) }) @@ -55,9 +56,9 @@ func TestNewLogs(t *testing.T) { return ld }() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("AB", "C", "D"))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('C', "D")), - plogutiltest.WithResource('B', plogutiltest.WithScope('C', "D")), + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + plogutiltest.Resource("A", plogutiltest.Scope("C", plogutiltest.LogRecord("D"))), + plogutiltest.Resource("B", plogutiltest.Scope("C", plogutiltest.LogRecord("D"))), ))) }) @@ -77,8 +78,11 @@ func TestNewLogs(t *testing.T) { return ld }() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("A", "BC", "D"))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('B', "D"), plogutiltest.WithScope('C', "D")), + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + plogutiltest.Resource("A", + plogutiltest.Scope("B", plogutiltest.LogRecord("D")), + plogutiltest.Scope("C", plogutiltest.LogRecord("D")), + ), ))) }) @@ -96,8 +100,8 @@ func TestNewLogs(t *testing.T) { return ld }() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("A", "B", "CD"))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('B', "CD")), + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + plogutiltest.Resource("A", plogutiltest.Scope("B", plogutiltest.LogRecord("C"), plogutiltest.LogRecord("D"))), ))) }) @@ -124,9 +128,14 @@ func TestNewLogs(t *testing.T) { l.Body().SetStr("logG") // resourceB.scopeD.logG return ld }() - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('C', "E"), plogutiltest.WithScope('D', "E")), - plogutiltest.WithResource('B', plogutiltest.WithScope('D', "FG")), + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + plogutiltest.Resource("A", + plogutiltest.Scope("C", plogutiltest.LogRecord("E")), + plogutiltest.Scope("D", plogutiltest.LogRecord("E")), + ), + plogutiltest.Resource("B", + plogutiltest.Scope("D", plogutiltest.LogRecord("F"), plogutiltest.LogRecord("G")), + ), ))) }) } diff --git a/connector/routingconnector/logs_test.go b/connector/routingconnector/logs_test.go index c0198fe16523..bcad290a28bc 100644 --- a/connector/routingconnector/logs_test.go +++ b/connector/routingconnector/logs_test.go @@ -811,9 +811,14 @@ func TestLogsConnectorDetailed(t *testing.T) { input: plogutiltest.NewLogs("AB", "CD", "EF"), expectSink0: plogutiltest.NewLogs("B", "D", "EF"), expectSink1: plog.Logs{}, - expectSinkD: plogutiltest.NewLogsFromOpts( - plogutiltest.WithResource('A', plogutiltest.WithScope('C', "EF"), plogutiltest.WithScope('D', "EF")), - plogutiltest.WithResource('B', plogutiltest.WithScope('C', "EF")), + expectSinkD: plogutiltest.New( + plogutiltest.Resource("A", + plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), + plogutiltest.Resource("B", + plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), + ), ), }, { From 8ea5956d78eb27bbb81ead15e431302feadba902 Mon Sep 17 00:00:00 2001 From: Joshua MacDonald Date: Thu, 14 Nov 2024 08:41:28 -0800 Subject: [PATCH 02/14] (otelarrowreceiver): LIFO-based admission control, waiting limit expressed in MiB of request data (#36141) --- .chloggen/otelarrow-admission.yaml | 27 +++ internal/otelarrow/admission/README.md | 20 -- internal/otelarrow/admission/boundedqueue.go | 164 ------------- .../otelarrow/admission/boundedqueue_test.go | 219 ------------------ internal/otelarrow/admission/controller.go | 59 ----- internal/otelarrow/go.mod | 6 +- internal/otelarrow/go.sum | 9 - internal/otelarrow/test/e2e_test.go | 7 +- receiver/otelarrowreceiver/README.md | 6 +- receiver/otelarrowreceiver/config.go | 7 +- receiver/otelarrowreceiver/config_test.go | 5 +- receiver/otelarrowreceiver/factory.go | 4 +- receiver/otelarrowreceiver/go.mod | 4 - receiver/otelarrowreceiver/go.sum | 9 - .../otelarrowreceiver/internal/arrow/arrow.go | 16 +- .../internal/arrow/arrow_test.go | 21 +- .../otelarrowreceiver/internal/logs/otlp.go | 13 +- .../internal/logs/otlp_test.go | 4 +- .../internal/metrics/otlp.go | 13 +- .../internal/metrics/otlp_test.go | 4 +- .../otelarrowreceiver/internal/trace/otlp.go | 13 +- .../internal/trace/otlp_test.go | 4 +- receiver/otelarrowreceiver/otelarrow.go | 10 +- .../otelarrowreceiver/testdata/config.yaml | 2 +- 24 files changed, 90 insertions(+), 556 deletions(-) create mode 100644 .chloggen/otelarrow-admission.yaml delete mode 100644 internal/otelarrow/admission/README.md delete mode 100644 internal/otelarrow/admission/boundedqueue.go delete mode 100644 internal/otelarrow/admission/boundedqueue_test.go delete mode 100644 internal/otelarrow/admission/controller.go diff --git a/.chloggen/otelarrow-admission.yaml b/.chloggen/otelarrow-admission.yaml new file mode 100644 index 000000000000..8bd92566d66c --- /dev/null +++ b/.chloggen/otelarrow-admission.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: otelarrowreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Admission control improvements (LIFO); admission.waiter_limit is deprecated, replaced with admission.waiting_limit_mib. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36074] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/internal/otelarrow/admission/README.md b/internal/otelarrow/admission/README.md deleted file mode 100644 index 053ad05a8fcf..000000000000 --- a/internal/otelarrow/admission/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Admission Package - -## Overview - -The admission package provides a BoundedQueue object which is a semaphore implementation that limits the number of bytes admitted into a collector pipeline. Additionally the BoundedQueue limits the number of waiters that can block on a call to `bq.Acquire(sz int64)`. - -This package is an experiment to improve the behavior of Collector pipelines having their `exporterhelper` configured to apply backpressure. This package is meant to be used in receivers, via an interceptor or custom logic. Therefore, the BoundedQueue helps limit memory within the entire collector pipeline by limiting two dimensions that cause memory issues: -1. bytes: large requests that enter the collector pipeline can require large allocations even if downstream components will eventually limit or ratelimit the request. -2. waiters: limiting on bytes alone is not enough because requests that enter the pipeline and block on `bq.Acquire()` can still consume memory within the receiver. If there are enough waiters this can be a significant contribution to memory usage. - -## Usage - -Create a new BoundedQueue by calling `bq := admission.NewBoundedQueue(maxLimitBytes, maxLimitWaiters)` - -Within the component call `bq.Acquire(ctx, requestSize)` which will either -1. succeed immediately if there is enough available memory -2. fail immediately if there are too many waiters -3. block until context cancelation or enough bytes becomes available - -Once a request has finished processing and is sent downstream call `bq.Release(requestSize)` to allow waiters to be admitted for processing. Release should only fail if releasing more bytes than previously acquired. \ No newline at end of file diff --git a/internal/otelarrow/admission/boundedqueue.go b/internal/otelarrow/admission/boundedqueue.go deleted file mode 100644 index 77a270a7d3dd..000000000000 --- a/internal/otelarrow/admission/boundedqueue.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package admission // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" - -import ( - "context" - "fmt" - "sync" - - "github.com/google/uuid" - orderedmap "github.com/wk8/go-ordered-map/v2" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - grpccodes "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var ErrTooManyWaiters = status.Error(grpccodes.ResourceExhausted, "rejecting request, too much pending data") -var ErrRequestTooLarge = status.Error(grpccodes.InvalidArgument, "rejecting request, request is too large") - -type BoundedQueue struct { - maxLimitBytes int64 - maxLimitWaiters int64 - currentBytes int64 - currentWaiters int64 - lock sync.Mutex - waiters *orderedmap.OrderedMap[uuid.UUID, waiter] - tracer trace.Tracer -} - -type waiter struct { - readyCh chan struct{} - pendingBytes int64 - ID uuid.UUID -} - -func NewBoundedQueue(ts component.TelemetrySettings, maxLimitBytes, maxLimitWaiters int64) Queue { - return &BoundedQueue{ - maxLimitBytes: maxLimitBytes, - maxLimitWaiters: maxLimitWaiters, - waiters: orderedmap.New[uuid.UUID, waiter](), - tracer: ts.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow"), - } -} - -func (bq *BoundedQueue) admit(pendingBytes int64) (bool, error) { - bq.lock.Lock() - defer bq.lock.Unlock() - - if pendingBytes > bq.maxLimitBytes { // will never succeed - return false, ErrRequestTooLarge - } - - if bq.currentBytes+pendingBytes <= bq.maxLimitBytes { // no need to wait to admit - bq.currentBytes += pendingBytes - return true, nil - } - - // since we were unable to admit, check if we can wait. - if bq.currentWaiters+1 > bq.maxLimitWaiters { // too many waiters - return false, ErrTooManyWaiters - } - - // if we got to this point we need to wait to acquire bytes, so update currentWaiters before releasing mutex. - bq.currentWaiters++ - return false, nil -} - -func (bq *BoundedQueue) Acquire(ctx context.Context, pendingBytes int64) error { - success, err := bq.admit(pendingBytes) - if err != nil || success { - return err - } - - // otherwise we need to wait for bytes to be released - curWaiter := waiter{ - pendingBytes: pendingBytes, - readyCh: make(chan struct{}), - } - - bq.lock.Lock() - - // generate unique key - for { - id := uuid.New() - _, keyExists := bq.waiters.Get(id) - if keyExists { - continue - } - bq.waiters.Set(id, curWaiter) - curWaiter.ID = id - break - } - - bq.lock.Unlock() - ctx, span := bq.tracer.Start(ctx, "admission_blocked", - trace.WithAttributes(attribute.Int64("pending", pendingBytes))) - defer span.End() - - select { - case <-curWaiter.readyCh: - return nil - case <-ctx.Done(): - // canceled before acquired so remove waiter. - bq.lock.Lock() - defer bq.lock.Unlock() - err = fmt.Errorf("context canceled: %w ", ctx.Err()) - span.SetStatus(codes.Error, "context canceled") - - _, found := bq.waiters.Delete(curWaiter.ID) - if !found { - return err - } - - bq.currentWaiters-- - return err - } -} - -func (bq *BoundedQueue) Release(pendingBytes int64) error { - bq.lock.Lock() - defer bq.lock.Unlock() - - bq.currentBytes -= pendingBytes - - if bq.currentBytes < 0 { - return fmt.Errorf("released more bytes than acquired") - } - - for { - if bq.waiters.Len() == 0 { - return nil - } - next := bq.waiters.Oldest() - nextWaiter := next.Value - nextKey := next.Key - if bq.currentBytes+nextWaiter.pendingBytes <= bq.maxLimitBytes { - bq.currentBytes += nextWaiter.pendingBytes - bq.currentWaiters-- - close(nextWaiter.readyCh) - _, found := bq.waiters.Delete(nextKey) - if !found { - return fmt.Errorf("deleting waiter that doesn't exist") - } - continue - } - break - } - - return nil -} - -func (bq *BoundedQueue) TryAcquire(pendingBytes int64) bool { - bq.lock.Lock() - defer bq.lock.Unlock() - if bq.currentBytes+pendingBytes <= bq.maxLimitBytes { - bq.currentBytes += pendingBytes - return true - } - return false -} diff --git a/internal/otelarrow/admission/boundedqueue_test.go b/internal/otelarrow/admission/boundedqueue_test.go deleted file mode 100644 index ffd616018ea2..000000000000 --- a/internal/otelarrow/admission/boundedqueue_test.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package admission - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/sdk/trace/tracetest" - "go.uber.org/multierr" -) - -func min(x, y int64) int64 { - if x <= y { - return x - } - return y -} - -func max(x, y int64) int64 { - if x >= y { - return x - } - return y -} - -func abs(x int64) int64 { - if x < 0 { - return -x - } - return x -} - -var noopTelemetry = componenttest.NewNopTelemetrySettings() - -func newTestQueue(limitBytes, limitWaiters int64) *BoundedQueue { - return NewBoundedQueue(noopTelemetry, limitBytes, limitWaiters).(*BoundedQueue) -} - -func TestAcquireSimpleNoWaiters(t *testing.T) { - maxLimitBytes := 1000 - maxLimitWaiters := 10 - numRequests := 40 - requestSize := 21 - - bq := newTestQueue(int64(maxLimitBytes), int64(maxLimitWaiters)) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - for i := 0; i < numRequests; i++ { - go func() { - err := bq.Acquire(ctx, int64(requestSize)) - assert.NoError(t, err) - }() - } - - require.Never(t, func() bool { - return bq.waiters.Len() > 0 - }, 2*time.Second, 10*time.Millisecond) - - for i := 0; i < numRequests; i++ { - assert.NoError(t, bq.Release(int64(requestSize))) - assert.Equal(t, int64(0), bq.currentWaiters) - } - - assert.ErrorContains(t, bq.Release(int64(1)), "released more bytes than acquired") - assert.NoError(t, bq.Acquire(ctx, int64(maxLimitBytes))) -} - -func TestAcquireBoundedWithWaiters(t *testing.T) { - tests := []struct { - name string - maxLimitBytes int64 - maxLimitWaiters int64 - numRequests int64 - requestSize int64 - timeout time.Duration - }{ - { - name: "below max waiters above max bytes", - maxLimitBytes: 1000, - maxLimitWaiters: 100, - numRequests: 100, - requestSize: 21, - timeout: 5 * time.Second, - }, - { - name: "above max waiters above max bytes", - maxLimitBytes: 1000, - maxLimitWaiters: 100, - numRequests: 200, - requestSize: 21, - timeout: 5 * time.Second, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - bq := newTestQueue(tt.maxLimitBytes, tt.maxLimitWaiters) - var blockedRequests int64 - numReqsUntilBlocked := tt.maxLimitBytes / tt.requestSize - requestsAboveLimit := abs(tt.numRequests - numReqsUntilBlocked) - tooManyWaiters := requestsAboveLimit > tt.maxLimitWaiters - numRejected := max(requestsAboveLimit-tt.maxLimitWaiters, int64(0)) - - // There should never be more blocked requests than maxLimitWaiters. - blockedRequests = min(tt.maxLimitWaiters, requestsAboveLimit) - - ctx, cancel := context.WithTimeout(context.Background(), tt.timeout) - defer cancel() - var errs error - for i := 0; i < int(tt.numRequests); i++ { - go func() { - err := bq.Acquire(ctx, tt.requestSize) - bq.lock.Lock() - defer bq.lock.Unlock() - errs = multierr.Append(errs, err) - }() - } - - require.Eventually(t, func() bool { - bq.lock.Lock() - defer bq.lock.Unlock() - return bq.waiters.Len() == int(blockedRequests) - }, 3*time.Second, 10*time.Millisecond) - - assert.NoError(t, bq.Release(tt.requestSize)) - assert.Equal(t, bq.waiters.Len(), int(blockedRequests)-1) - - for i := 0; i < int(tt.numRequests-numRejected)-1; i++ { - assert.NoError(t, bq.Release(tt.requestSize)) - } - - bq.lock.Lock() - if tooManyWaiters { - assert.ErrorContains(t, errs, ErrTooManyWaiters.Error()) - } else { - assert.NoError(t, errs) - } - bq.lock.Unlock() - - // confirm all bytes were released by acquiring maxLimitBytes. - assert.True(t, bq.TryAcquire(tt.maxLimitBytes)) - }) - } -} - -func TestAcquireContextCanceled(t *testing.T) { - maxLimitBytes := 1000 - maxLimitWaiters := 100 - numRequests := 100 - requestSize := 21 - numReqsUntilBlocked := maxLimitBytes / requestSize - requestsAboveLimit := abs(int64(numRequests) - int64(numReqsUntilBlocked)) - - blockedRequests := min(int64(maxLimitWaiters), requestsAboveLimit) - - exp := tracetest.NewInMemoryExporter() - tp := trace.NewTracerProvider(trace.WithSyncer(exp)) - ts := noopTelemetry - ts.TracerProvider = tp - - bq := NewBoundedQueue(ts, int64(maxLimitBytes), int64(maxLimitWaiters)).(*BoundedQueue) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - var errs error - var wg sync.WaitGroup - for i := 0; i < numRequests; i++ { - wg.Add(1) - go func() { - err := bq.Acquire(ctx, int64(requestSize)) - bq.lock.Lock() - defer bq.lock.Unlock() - errs = multierr.Append(errs, err) - wg.Done() - }() - } - - // Wait until all calls to Acquire() happen and we have the expected number of waiters. - require.Eventually(t, func() bool { - bq.lock.Lock() - defer bq.lock.Unlock() - return bq.waiters.Len() == int(blockedRequests) - }, 3*time.Second, 10*time.Millisecond) - - cancel() - wg.Wait() - assert.ErrorContains(t, errs, "context canceled") - - // Expect spans named admission_blocked w/ context canceled. - spans := exp.GetSpans() - exp.Reset() - assert.NotEmpty(t, spans) - for _, span := range spans { - assert.Equal(t, "admission_blocked", span.Name) - assert.Equal(t, codes.Error, span.Status.Code) - assert.Equal(t, "context canceled", span.Status.Description) - } - - // Now all waiters should have returned and been removed. - assert.Equal(t, 0, bq.waiters.Len()) - - for i := 0; i < numReqsUntilBlocked; i++ { - assert.NoError(t, bq.Release(int64(requestSize))) - assert.Equal(t, int64(0), bq.currentWaiters) - } - assert.True(t, bq.TryAcquire(int64(maxLimitBytes))) - - // Expect no more spans, because admission was not blocked. - spans = exp.GetSpans() - require.Empty(t, spans) -} diff --git a/internal/otelarrow/admission/controller.go b/internal/otelarrow/admission/controller.go deleted file mode 100644 index 989c4a4467cb..000000000000 --- a/internal/otelarrow/admission/controller.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package admission // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" - -import ( - "context" -) - -// Queue is a weighted admission queue interface. -type Queue interface { - // Acquire asks the controller to admit the caller. - // - // The weight parameter specifies how large of an admission to make. - // This might be used on the bytes of request (for example) to differentiate - // between large and small requests. - // - // Admit will return when one of the following events occurs: - // - // (1) admission is allowed, or - // (2) the provided ctx becomes canceled, or - // (3) there are so many existing waiters that the - // controller decides to reject this caller without - // admitting it. - // - // In case (1), the return value will be a non-nil error. The - // caller must invoke Release(weight) after it is finished - // with the resource being guarded by the admission - // controller. - // - // In case (2), the return value will be a Cancelled or - // DeadlineExceeded error. - // - // In case (3), the return value will be a ResourceExhausted - // error. - Acquire(ctx context.Context, weight int64) error - - // Release will be eliminated as part of issue #36074. - Release(weight int64) error -} - -type noopController struct{} - -var _ Queue = noopController{} - -// NewUnboundedQueue returns a no-op implementation of the Queue interface. -func NewUnboundedQueue() Queue { - return noopController{} -} - -// Acquire implements Queue. -func (noopController) Acquire(_ context.Context, _ int64) error { - return nil -} - -// Acquire implements Queue. -func (noopController) Release(_ int64) error { - return nil -} diff --git a/internal/otelarrow/go.mod b/internal/otelarrow/go.mod index 0b10b362f498..f42ea96a0fa6 100644 --- a/internal/otelarrow/go.mod +++ b/internal/otelarrow/go.mod @@ -3,13 +3,11 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelar go 1.22.0 require ( - github.com/google/uuid v1.6.0 github.com/klauspost/compress v1.17.11 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter v0.113.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver v0.113.0 github.com/open-telemetry/otel-arrow v0.30.0 github.com/stretchr/testify v1.9.0 - github.com/wk8/go-ordered-map/v2 v2.1.8 go.opentelemetry.io/collector/component v0.113.0 go.opentelemetry.io/collector/config/configgrpc v0.113.0 go.opentelemetry.io/collector/config/configtelemetry v0.113.0 @@ -35,9 +33,7 @@ require ( github.com/apache/arrow/go/v16 v16.1.0 // indirect github.com/apache/arrow/go/v17 v17.0.0 // indirect github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc // indirect - github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/brianvoe/gofakeit/v6 v6.17.0 // indirect - github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect @@ -50,12 +46,12 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect + github.com/google/uuid v1.6.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect diff --git a/internal/otelarrow/go.sum b/internal/otelarrow/go.sum index 2c932716bad2..31559a8aa84a 100644 --- a/internal/otelarrow/go.sum +++ b/internal/otelarrow/go.sum @@ -9,12 +9,8 @@ github.com/apache/arrow/go/v17 v17.0.0 h1:RRR2bdqKcdbss9Gxy2NS/hK8i4LDMh23L6BbkN github.com/apache/arrow/go/v17 v17.0.0/go.mod h1:jR7QHkODl15PfYyjM2nU+yTLScZ/qfj7OSUZmJ8putc= github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc h1:Keo7wQ7UODUaHcEi7ltENhbAK2VgZjfat6mLy03tQzo= github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= -github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= -github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/brianvoe/gofakeit/v6 v6.17.0 h1:obbQTJeHfktJtiZzq0Q1bEpsNUs+yHrYlPVWt7BtmJ4= github.com/brianvoe/gofakeit/v6 v6.17.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -51,7 +47,6 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -73,8 +68,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -100,8 +93,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/internal/otelarrow/test/e2e_test.go b/internal/otelarrow/test/e2e_test.go index f6b3f3fc4578..348956d2d681 100644 --- a/internal/otelarrow/test/e2e_test.go +++ b/internal/otelarrow/test/e2e_test.go @@ -711,12 +711,7 @@ func TestIntegrationAdmissionLimited(t *testing.T) { testIntegrationTraces(ctx, t, params, func(ecfg *ExpConfig, rcfg *RecvConfig) { rcfg.Admission.RequestLimitMiB = admitLimit - - // Note: #36074 will change WaiterLimit to WaitingLimitMiB - // measured in bytes, not request count. This test is designed - // to work either way by virtue of having requests that are - // just shy of 1MiB. - rcfg.Admission.WaiterLimit = int64(waitingLimit) + rcfg.Admission.WaitingLimitMiB = waitingLimit ecfg.Arrow.NumStreams = 10 diff --git a/receiver/otelarrowreceiver/README.md b/receiver/otelarrowreceiver/README.md index b34d99c18e7f..2088caec1bb0 100644 --- a/receiver/otelarrowreceiver/README.md +++ b/receiver/otelarrowreceiver/README.md @@ -82,11 +82,11 @@ Several common configuration structures provide additional capabilities automati In the `admission` configuration block the following settings are available: -- `request_limit_mib` (default: 128): limits the number of requests that are received by the stream in terms of *uncompressed request size*. This should not be confused with `arrow.memory_limit_mib` which limits allocations made by the consumer when translating arrow records into pdata objects. i.e. request size is used to control how much traffic we admit, but does not control how much memory is used during request processing. When this field is set to zero, admission control is disabled. +- `request_limit_mib` (default: 128): limits the number of requests that are received by the stream in terms of *uncompressed request size*. This should not be confused with `arrow.memory_limit_mib`, which limits allocations made by the consumer when translating arrow records into pdata objects. The `request_limit_mib` value is used to control how much traffic we admit, but does not control how much memory is used during request processing. -- `waiter_limit` (default: 1000): limits the number of requests waiting on admission once `admission_limit_mib` is reached. This is another dimension of memory limiting that ensures waiters are not holding onto a significant amount of memory while waiting to be processed. +- `waiting_limit_mib` (default: 32): limits the number of requests waiting on admission after `request_limit_mib` is reached. This is another dimension of memory limiting that ensures waiters are not holding onto a significant amount of memory while waiting to be processed. -`request_limit_mib` and `waiter_limit` are arguments supplied to [admission.BoundedQueue](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/otelarrow/admission). This custom semaphore is meant to be used within receivers to help limit memory within the collector pipeline. +`request_limit_mib` and `waiting_limit_mib` are arguments supplied to [admission.BoundedQueue](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/otelarrow/admission2). This custom semaphore is meant to be used within receivers to help limit memory within the collector pipeline. ### Arrow-specific Configuration diff --git a/receiver/otelarrowreceiver/config.go b/receiver/otelarrowreceiver/config.go index 0f674d656b09..934f8e26bc64 100644 --- a/receiver/otelarrowreceiver/config.go +++ b/receiver/otelarrowreceiver/config.go @@ -25,10 +25,10 @@ type AdmissionConfig struct { // for processing. When this field is zero, admission control is disabled. RequestLimitMiB uint64 `mapstructure:"request_limit_mib"` - // WaiterLimit is the limit on the number of waiters waiting to be processed and consumed. + // WaitingLimitMiB is the limit on the amount of data waiting to be consumed. // This is a dimension of memory limiting to ensure waiters are not consuming an // unexpectedly large amount of memory in the arrow receiver. - WaiterLimit int64 `mapstructure:"waiter_limit"` + WaitingLimitMiB uint64 `mapstructure:"waiting_limit_mib"` } // ArrowConfig support configuring the Arrow receiver. @@ -86,8 +86,5 @@ func (cfg *Config) Unmarshal(conf *confmap.Conf) error { if cfg.Admission.RequestLimitMiB == 0 && cfg.Arrow.DeprecatedAdmissionLimitMiB != 0 { cfg.Admission.RequestLimitMiB = cfg.Arrow.DeprecatedAdmissionLimitMiB } - if cfg.Admission.WaiterLimit == 0 && cfg.Arrow.DeprecatedWaiterLimit != 0 { - cfg.Admission.WaiterLimit = cfg.Arrow.DeprecatedWaiterLimit - } return nil } diff --git a/receiver/otelarrowreceiver/config_test.go b/receiver/otelarrowreceiver/config_test.go index 29a1e8a08ad5..d50edab9fac2 100644 --- a/receiver/otelarrowreceiver/config_test.go +++ b/receiver/otelarrowreceiver/config_test.go @@ -82,7 +82,7 @@ func TestUnmarshalConfig(t *testing.T) { }, Admission: AdmissionConfig{ RequestLimitMiB: 80, - WaiterLimit: 100, + WaitingLimitMiB: 100, }, }, cfg) } @@ -106,7 +106,6 @@ func TestValidateDeprecatedConfig(t *testing.T) { Admission: AdmissionConfig{ // cfg.Validate should now set these fields. RequestLimitMiB: 80, - WaiterLimit: 100, }, }, cfg) } @@ -133,7 +132,7 @@ func TestUnmarshalConfigUnix(t *testing.T) { }, Admission: AdmissionConfig{ RequestLimitMiB: defaultRequestLimitMiB, - WaiterLimit: defaultWaiterLimit, + WaitingLimitMiB: defaultWaitingLimitMiB, }, }, cfg) } diff --git a/receiver/otelarrowreceiver/factory.go b/receiver/otelarrowreceiver/factory.go index 07ccc1bbbb1a..c440a8de925b 100644 --- a/receiver/otelarrowreceiver/factory.go +++ b/receiver/otelarrowreceiver/factory.go @@ -21,7 +21,7 @@ const ( defaultMemoryLimitMiB = 128 defaultRequestLimitMiB = 128 - defaultWaiterLimit = 1000 + defaultWaitingLimitMiB = 32 ) // NewFactory creates a new OTel-Arrow receiver factory. @@ -52,7 +52,7 @@ func createDefaultConfig() component.Config { }, Admission: AdmissionConfig{ RequestLimitMiB: defaultRequestLimitMiB, - WaiterLimit: defaultWaiterLimit, + WaitingLimitMiB: defaultWaitingLimitMiB, }, } } diff --git a/receiver/otelarrowreceiver/go.mod b/receiver/otelarrowreceiver/go.mod index 3208842382f9..b406619d0a6d 100644 --- a/receiver/otelarrowreceiver/go.mod +++ b/receiver/otelarrowreceiver/go.mod @@ -41,8 +41,6 @@ require ( github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/apache/arrow/go/v17 v17.0.0 // indirect github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc // indirect - github.com/bahlo/generic-list-go v0.2.0 // indirect - github.com/buger/jsonparser v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect @@ -61,7 +59,6 @@ require ( github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -69,7 +66,6 @@ require ( github.com/mostynb/go-grpc-compression v1.2.3 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opentelemetry.io/collector/config/configcompression v1.19.0 // indirect diff --git a/receiver/otelarrowreceiver/go.sum b/receiver/otelarrowreceiver/go.sum index bcb4f1920dda..4e59aad59a0d 100644 --- a/receiver/otelarrowreceiver/go.sum +++ b/receiver/otelarrowreceiver/go.sum @@ -7,12 +7,8 @@ github.com/apache/arrow/go/v17 v17.0.0 h1:RRR2bdqKcdbss9Gxy2NS/hK8i4LDMh23L6BbkN github.com/apache/arrow/go/v17 v17.0.0/go.mod h1:jR7QHkODl15PfYyjM2nU+yTLScZ/qfj7OSUZmJ8putc= github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc h1:Keo7wQ7UODUaHcEi7ltENhbAK2VgZjfat6mLy03tQzo= github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= -github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= -github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/brianvoe/gofakeit/v6 v6.17.0 h1:obbQTJeHfktJtiZzq0Q1bEpsNUs+yHrYlPVWt7BtmJ4= github.com/brianvoe/gofakeit/v6 v6.17.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -47,7 +43,6 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -69,8 +64,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -96,8 +89,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow.go b/receiver/otelarrowreceiver/internal/arrow/arrow.go index 21d3a421bcf7..6b28f52d356b 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow.go @@ -38,7 +38,7 @@ import ( "google.golang.org/grpc/status" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats" internalmetadata "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver/internal/metadata" ) @@ -76,7 +76,7 @@ type Receiver struct { newConsumer func() arrowRecord.ConsumerAPI netReporter netstats.Interface telemetryBuilder *internalmetadata.TelemetryBuilder - boundedQueue admission.Queue + boundedQueue admission2.Queue } // receiverStream holds the inFlightWG for a single stream. @@ -93,7 +93,7 @@ func New( gsettings configgrpc.ServerConfig, authServer auth.Server, newConsumer func() arrowRecord.ConsumerAPI, - bq admission.Queue, + bq admission2.Queue, netReporter netstats.Interface, ) (*Receiver, error) { tracer := set.TelemetrySettings.TracerProvider.Tracer("otel-arrow-receiver") @@ -452,6 +452,7 @@ type inFlightData struct { numItems int // how many items uncompSize int64 // uncompressed data size == how many bytes held in the semaphore + releaser admission2.ReleaseFunc } func (id *inFlightData) recvDone(ctx context.Context, recvErrPtr *error) { @@ -500,10 +501,8 @@ func (id *inFlightData) anyDone(ctx context.Context) { id.span.End() - if id.uncompSize != 0 { - if err := id.boundedQueue.Release(id.uncompSize); err != nil { - id.telemetry.Logger.Error("release error", zap.Error(err)) - } + if id.releaser != nil { + id.releaser() } if id.uncompSize != 0 { @@ -631,12 +630,13 @@ func (r *receiverStream) recvOne(streamCtx context.Context, serverStream anyStre // immediately if there are too many waiters, or will // otherwise block until timeout or enough memory becomes // available. - acquireErr := r.boundedQueue.Acquire(inflightCtx, uncompSize) + releaser, acquireErr := r.boundedQueue.Acquire(inflightCtx, uint64(uncompSize)) if acquireErr != nil { return acquireErr } flight.uncompSize = uncompSize flight.numItems = numItems + flight.releaser = releaser r.telemetryBuilder.OtelArrowReceiverInFlightBytes.Add(inflightCtx, uncompSize) r.telemetryBuilder.OtelArrowReceiverInFlightItems.Add(inflightCtx, int64(numItems)) diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go index 65d314bdb88c..57a92c7d922e 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go @@ -43,7 +43,7 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/testdata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver/internal/arrow/mock" @@ -51,8 +51,8 @@ import ( var noopTelemetry = componenttest.NewNopTelemetrySettings() -func defaultBQ() admission.Queue { - return admission.NewBoundedQueue(noopTelemetry, 100000, 10) +func defaultBQ() admission2.Queue { + return admission2.NewBoundedQueue(noopTelemetry, 100000, 10) } type ( @@ -362,7 +362,7 @@ func (ctc *commonTestCase) newOOMConsumer() arrowRecord.ConsumerAPI { return mock } -func (ctc *commonTestCase) start(newConsumer func() arrowRecord.ConsumerAPI, bq admission.Queue, opts ...func(*configgrpc.ServerConfig, *auth.Server)) { +func (ctc *commonTestCase) start(newConsumer func() arrowRecord.ConsumerAPI, bq admission2.Queue, opts ...func(*configgrpc.ServerConfig, *auth.Server)) { var authServer auth.Server var gsettings configgrpc.ServerConfig for _, gf := range opts { @@ -452,13 +452,20 @@ func TestBoundedQueueLimits(t *testing.T) { batch, err := ctc.testProducer.BatchArrowRecordsFromTraces(td) require.NoError(t, err) - var bq admission.Queue + var bq admission2.Queue + // Note that this test exercises the case where there is or is not an + // error unrelated to pending data, thus we pass 0 in both cases as + // the WaitingLimitMiB below. + // + // There is an end-to-end test of admission control, including the + // ResourceExhausted status code we expect, in + // internal/otelarrow/test/e2e_test.go. if tt.expectErr { ctc.stream.EXPECT().Send(statusOKFor(batch.BatchId)).Times(0) - bq = admission.NewBoundedQueue(noopTelemetry, int64(sizer.TracesSize(td)-100), 10) + bq = admission2.NewBoundedQueue(noopTelemetry, uint64(sizer.TracesSize(td)-100), 0) } else { ctc.stream.EXPECT().Send(statusOKFor(batch.BatchId)).Times(1).Return(nil) - bq = admission.NewBoundedQueue(noopTelemetry, tt.admitLimit, 10) + bq = admission2.NewBoundedQueue(noopTelemetry, uint64(tt.admitLimit), 0) } ctc.start(ctc.newRealConsumer, bq) diff --git a/receiver/otelarrowreceiver/internal/logs/otlp.go b/receiver/otelarrowreceiver/internal/logs/otlp.go index 46bf68f3782a..6a4c76dbaeed 100644 --- a/receiver/otelarrowreceiver/internal/logs/otlp.go +++ b/receiver/otelarrowreceiver/internal/logs/otlp.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver/receiverhelper" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" ) const dataFormatProtobuf = "protobuf" @@ -22,13 +22,13 @@ type Receiver struct { plogotlp.UnimplementedGRPCServer nextConsumer consumer.Logs obsrecv *receiverhelper.ObsReport - boundedQueue admission.Queue + boundedQueue admission2.Queue sizer *plog.ProtoMarshaler logger *zap.Logger } // New creates a new Receiver reference. -func New(logger *zap.Logger, nextConsumer consumer.Logs, obsrecv *receiverhelper.ObsReport, bq admission.Queue) *Receiver { +func New(logger *zap.Logger, nextConsumer consumer.Logs, obsrecv *receiverhelper.ObsReport, bq admission2.Queue) *Receiver { return &Receiver{ nextConsumer: nextConsumer, obsrecv: obsrecv, @@ -49,11 +49,10 @@ func (r *Receiver) Export(ctx context.Context, req plogotlp.ExportRequest) (plog ctx = r.obsrecv.StartLogsOp(ctx) var err error - sizeBytes := int64(r.sizer.LogsSize(req.Logs())) - if acqErr := r.boundedQueue.Acquire(ctx, sizeBytes); acqErr == nil { + sizeBytes := uint64(r.sizer.LogsSize(req.Logs())) + if releaser, acqErr := r.boundedQueue.Acquire(ctx, sizeBytes); acqErr == nil { err = r.nextConsumer.ConsumeLogs(ctx, ld) - // Release() is not checked, see #36074. - _ = r.boundedQueue.Release(sizeBytes) // immediate release + releaser() // immediate release } else { err = acqErr } diff --git a/receiver/otelarrowreceiver/internal/logs/otlp_test.go b/receiver/otelarrowreceiver/internal/logs/otlp_test.go index a0bbb056f24c..d270ff138bfb 100644 --- a/receiver/otelarrowreceiver/internal/logs/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/logs/otlp_test.go @@ -29,7 +29,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/testdata" ) @@ -206,7 +206,7 @@ func otlpReceiverOnGRPCServer(t *testing.T, lc consumer.Logs) (net.Addr, *tracet ReceiverCreateSettings: set, }) require.NoError(t, err) - bq := admission.NewBoundedQueue(telset, maxBytes, 0) + bq := admission2.NewBoundedQueue(telset, maxBytes, 0) r := New(zap.NewNop(), lc, obsrecv, bq) // Now run it as a gRPC server srv := grpc.NewServer() diff --git a/receiver/otelarrowreceiver/internal/metrics/otlp.go b/receiver/otelarrowreceiver/internal/metrics/otlp.go index 3eabffd43f02..51ffdb377fea 100644 --- a/receiver/otelarrowreceiver/internal/metrics/otlp.go +++ b/receiver/otelarrowreceiver/internal/metrics/otlp.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver/receiverhelper" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" ) const dataFormatProtobuf = "protobuf" @@ -22,13 +22,13 @@ type Receiver struct { pmetricotlp.UnimplementedGRPCServer nextConsumer consumer.Metrics obsrecv *receiverhelper.ObsReport - boundedQueue admission.Queue + boundedQueue admission2.Queue sizer *pmetric.ProtoMarshaler logger *zap.Logger } // New creates a new Receiver reference. -func New(logger *zap.Logger, nextConsumer consumer.Metrics, obsrecv *receiverhelper.ObsReport, bq admission.Queue) *Receiver { +func New(logger *zap.Logger, nextConsumer consumer.Metrics, obsrecv *receiverhelper.ObsReport, bq admission2.Queue) *Receiver { return &Receiver{ nextConsumer: nextConsumer, obsrecv: obsrecv, @@ -49,11 +49,10 @@ func (r *Receiver) Export(ctx context.Context, req pmetricotlp.ExportRequest) (p ctx = r.obsrecv.StartMetricsOp(ctx) var err error - sizeBytes := int64(r.sizer.MetricsSize(req.Metrics())) - if acqErr := r.boundedQueue.Acquire(ctx, sizeBytes); acqErr == nil { + sizeBytes := uint64(r.sizer.MetricsSize(req.Metrics())) + if releaser, acqErr := r.boundedQueue.Acquire(ctx, sizeBytes); acqErr == nil { err = r.nextConsumer.ConsumeMetrics(ctx, md) - // Release() is not checked, see #36074. - _ = r.boundedQueue.Release(sizeBytes) // immediate release + releaser() // immediate release } else { err = acqErr } diff --git a/receiver/otelarrowreceiver/internal/metrics/otlp_test.go b/receiver/otelarrowreceiver/internal/metrics/otlp_test.go index 77a690963bac..a78378f54bd3 100644 --- a/receiver/otelarrowreceiver/internal/metrics/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/metrics/otlp_test.go @@ -29,7 +29,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/testdata" ) @@ -206,7 +206,7 @@ func otlpReceiverOnGRPCServer(t *testing.T, mc consumer.Metrics) (net.Addr, *tra ReceiverCreateSettings: set, }) require.NoError(t, err) - bq := admission.NewBoundedQueue(telset, maxBytes, 0) + bq := admission2.NewBoundedQueue(telset, maxBytes, 0) r := New(zap.NewNop(), mc, obsrecv, bq) // Now run it as a gRPC server srv := grpc.NewServer() diff --git a/receiver/otelarrowreceiver/internal/trace/otlp.go b/receiver/otelarrowreceiver/internal/trace/otlp.go index 00744a4c31b4..d4d94b404622 100644 --- a/receiver/otelarrowreceiver/internal/trace/otlp.go +++ b/receiver/otelarrowreceiver/internal/trace/otlp.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver/receiverhelper" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" ) const dataFormatProtobuf = "protobuf" @@ -22,13 +22,13 @@ type Receiver struct { ptraceotlp.UnimplementedGRPCServer nextConsumer consumer.Traces obsrecv *receiverhelper.ObsReport - boundedQueue admission.Queue + boundedQueue admission2.Queue sizer *ptrace.ProtoMarshaler logger *zap.Logger } // New creates a new Receiver reference. -func New(logger *zap.Logger, nextConsumer consumer.Traces, obsrecv *receiverhelper.ObsReport, bq admission.Queue) *Receiver { +func New(logger *zap.Logger, nextConsumer consumer.Traces, obsrecv *receiverhelper.ObsReport, bq admission2.Queue) *Receiver { return &Receiver{ nextConsumer: nextConsumer, obsrecv: obsrecv, @@ -49,11 +49,10 @@ func (r *Receiver) Export(ctx context.Context, req ptraceotlp.ExportRequest) (pt ctx = r.obsrecv.StartTracesOp(ctx) var err error - sizeBytes := int64(r.sizer.TracesSize(req.Traces())) - if acqErr := r.boundedQueue.Acquire(ctx, sizeBytes); acqErr == nil { + sizeBytes := uint64(r.sizer.TracesSize(req.Traces())) + if releaser, acqErr := r.boundedQueue.Acquire(ctx, sizeBytes); acqErr == nil { err = r.nextConsumer.ConsumeTraces(ctx, td) - // Release() is not checked, see #36074. - _ = r.boundedQueue.Release(sizeBytes) // immediate release + releaser() // immediate release } else { err = acqErr } diff --git a/receiver/otelarrowreceiver/internal/trace/otlp_test.go b/receiver/otelarrowreceiver/internal/trace/otlp_test.go index ce769f5451c4..251e194dde3c 100644 --- a/receiver/otelarrowreceiver/internal/trace/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/trace/otlp_test.go @@ -29,7 +29,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/testdata" ) @@ -206,7 +206,7 @@ func otlpReceiverOnGRPCServer(t *testing.T, tc consumer.Traces) (net.Addr, *trac ReceiverCreateSettings: set, }) require.NoError(t, err) - bq := admission.NewBoundedQueue(telset, maxBytes, 0) + bq := admission2.NewBoundedQueue(telset, maxBytes, 0) r := New(zap.NewNop(), tc, obsrecv, bq) // Now run it as a gRPC server srv := grpc.NewServer() diff --git a/receiver/otelarrowreceiver/otelarrow.go b/receiver/otelarrowreceiver/otelarrow.go index 664d7a7d9d73..b21bea395e96 100644 --- a/receiver/otelarrowreceiver/otelarrow.go +++ b/receiver/otelarrowreceiver/otelarrow.go @@ -23,7 +23,7 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/compression/zstd" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver/internal/arrow" @@ -45,7 +45,7 @@ type otelArrowReceiver struct { obsrepGRPC *receiverhelper.ObsReport netReporter *netstats.NetworkReporter - boundedQueue admission.Queue + boundedQueue admission2.Queue settings receiver.Settings } @@ -66,11 +66,11 @@ func newOTelArrowReceiver(cfg *Config, set receiver.Settings) (*otelArrowReceive if err != nil { return nil, err } - var bq admission.Queue + var bq admission2.Queue if cfg.Admission.RequestLimitMiB == 0 { - bq = admission.NewUnboundedQueue() + bq = admission2.NewUnboundedQueue() } else { - bq = admission.NewBoundedQueue(set.TelemetrySettings, int64(cfg.Admission.RequestLimitMiB<<20), cfg.Admission.WaiterLimit) + bq = admission2.NewBoundedQueue(set.TelemetrySettings, cfg.Admission.RequestLimitMiB<<20, cfg.Admission.WaitingLimitMiB<<20) } r := &otelArrowReceiver{ cfg: cfg, diff --git a/receiver/otelarrowreceiver/testdata/config.yaml b/receiver/otelarrowreceiver/testdata/config.yaml index e911cafdd0c5..fee89ad789e8 100644 --- a/receiver/otelarrowreceiver/testdata/config.yaml +++ b/receiver/otelarrowreceiver/testdata/config.yaml @@ -29,4 +29,4 @@ protocols: memory_limit_mib: 123 admission: request_limit_mib: 80 - waiter_limit: 100 \ No newline at end of file + waiting_limit_mib: 100 From ff9c635edd1a30af2b74629fa000ba7b4ee438dd Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 14 Nov 2024 19:06:56 +0100 Subject: [PATCH 03/14] [chore]: enable gofumpt linter for connector (#36372) #### Description [gofumpt](https://golangci-lint.run/usage/linters/#gofumpt) enforces a stricter format than gofmt Part of https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/36363 Signed-off-by: Matthieu MOREL --- connector/countconnector/config_test.go | 3 ++- connector/otlpjsonconnector/factory.go | 8 +++++--- connector/routingconnector/logs.go | 1 - connector/routingconnector/metrics.go | 1 - connector/routingconnector/request.go | 8 +++++--- connector/routingconnector/traces.go | 1 - connector/servicegraphconnector/config.go | 1 - connector/servicegraphconnector/connector_test.go | 1 - connector/servicegraphconnector/internal/store/store.go | 4 +--- connector/spanmetricsconnector/config_test.go | 3 ++- connector/spanmetricsconnector/connector_test.go | 1 + connector/spanmetricsconnector/internal/metrics/unit.go | 6 ++++-- 12 files changed, 20 insertions(+), 18 deletions(-) diff --git a/connector/countconnector/config_test.go b/connector/countconnector/config_test.go index d3f37776f846..02728e2ed18a 100644 --- a/connector/countconnector/config_test.go +++ b/connector/countconnector/config_test.go @@ -279,7 +279,8 @@ func TestLoadConfig(t *testing.T) { }, Metrics: map[string]MetricInfo{ "my.metric.count": { - Description: "My metric count."}, + Description: "My metric count.", + }, "limited.metric.count": { Description: "Limited metric count.", Conditions: []string{`IsMatch(resource.attributes["host.name"], "pod-m")`}, diff --git a/connector/otlpjsonconnector/factory.go b/connector/otlpjsonconnector/factory.go index 386b765b0e34..45d031ede216 100644 --- a/connector/otlpjsonconnector/factory.go +++ b/connector/otlpjsonconnector/factory.go @@ -14,9 +14,11 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/otlpjsonconnector/internal/metadata" ) -var logRegex = regexp.MustCompile(`^\{\s*"resourceLogs"\s*:\s*\[`) -var metricRegex = regexp.MustCompile(`^\{\s*"resourceMetrics"\s*:\s*\[`) -var traceRegex = regexp.MustCompile(`^\{\s*"resourceSpans"\s*:\s*\[`) +var ( + logRegex = regexp.MustCompile(`^\{\s*"resourceLogs"\s*:\s*\[`) + metricRegex = regexp.MustCompile(`^\{\s*"resourceMetrics"\s*:\s*\[`) + traceRegex = regexp.MustCompile(`^\{\s*"resourceSpans"\s*:\s*\[`) +) // NewFactory returns a ConnectorFactory. func NewFactory() connector.Factory { diff --git a/connector/routingconnector/logs.go b/connector/routingconnector/logs.go index 20ef54979832..cb1cc213b06f 100644 --- a/connector/routingconnector/logs.go +++ b/connector/routingconnector/logs.go @@ -45,7 +45,6 @@ func newLogsConnector( cfg.DefaultPipelines, lr.Consumer, set.TelemetrySettings) - if err != nil { return nil, err } diff --git a/connector/routingconnector/metrics.go b/connector/routingconnector/metrics.go index e83f639453d7..7bdc81519b4b 100644 --- a/connector/routingconnector/metrics.go +++ b/connector/routingconnector/metrics.go @@ -45,7 +45,6 @@ func newMetricsConnector( cfg.DefaultPipelines, mr.Consumer, set.TelemetrySettings) - if err != nil { return nil, err } diff --git a/connector/routingconnector/request.go b/connector/routingconnector/request.go index 411c348e910d..ab7b70615025 100644 --- a/connector/routingconnector/request.go +++ b/connector/routingconnector/request.go @@ -18,9 +18,11 @@ import ( // future if needed. For now, it expects the condition to be in exactly the format: // 'request[""] ' where is either '==' or '!='. -var requestFieldRegex = regexp.MustCompile(`request\[".*"\]`) -var valueFieldRegex = regexp.MustCompile(`".*"`) -var comparatorRegex = regexp.MustCompile(`==|!=`) +var ( + requestFieldRegex = regexp.MustCompile(`request\[".*"\]`) + valueFieldRegex = regexp.MustCompile(`".*"`) + comparatorRegex = regexp.MustCompile(`==|!=`) +) type requestCondition struct { attributeName string diff --git a/connector/routingconnector/traces.go b/connector/routingconnector/traces.go index 9e0a5c9e0ce0..d679442c7f33 100644 --- a/connector/routingconnector/traces.go +++ b/connector/routingconnector/traces.go @@ -45,7 +45,6 @@ func newTracesConnector( cfg.DefaultPipelines, tr.Consumer, set.TelemetrySettings) - if err != nil { return nil, err } diff --git a/connector/servicegraphconnector/config.go b/connector/servicegraphconnector/config.go index 888fb5d4ddb0..847252d8dfb4 100644 --- a/connector/servicegraphconnector/config.go +++ b/connector/servicegraphconnector/config.go @@ -9,7 +9,6 @@ import ( // Config defines the configuration options for servicegraphprocessor. type Config struct { - // MetricsExporter is the name of the metrics exporter to use to ship metrics. // // Deprecated: The exporter is defined as part of the pipeline and this option is currently noop. diff --git a/connector/servicegraphconnector/connector_test.go b/connector/servicegraphconnector/connector_test.go index fbe15b3ef65e..3b6e1a4beda7 100644 --- a/connector/servicegraphconnector/connector_test.go +++ b/connector/servicegraphconnector/connector_test.go @@ -441,7 +441,6 @@ func TestUpdateDurationMetrics(t *testing.T) { caseStr string duration float64 }{ - { caseStr: "index 0 latency", duration: 0, diff --git a/connector/servicegraphconnector/internal/store/store.go b/connector/servicegraphconnector/internal/store/store.go index 62b551ca9786..3c095703984d 100644 --- a/connector/servicegraphconnector/internal/store/store.go +++ b/connector/servicegraphconnector/internal/store/store.go @@ -12,9 +12,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" ) -var ( - ErrTooManyItems = errors.New("too many items") -) +var ErrTooManyItems = errors.New("too many items") type Callback func(e *Edge) diff --git a/connector/spanmetricsconnector/config_test.go b/connector/spanmetricsconnector/config_test.go index cb2e45ff0a9c..d7142f84d9ad 100644 --- a/connector/spanmetricsconnector/config_test.go +++ b/connector/spanmetricsconnector/config_test.go @@ -67,7 +67,8 @@ func TestLoadConfig(t *testing.T) { }, }, }, - }}, + }, + }, { id: component.NewIDWithName(metadata.Type, "exponential_histogram"), expected: &Config{ diff --git a/connector/spanmetricsconnector/connector_test.go b/connector/spanmetricsconnector/connector_test.go index 35737a189dbb..5bd74c85f65a 100644 --- a/connector/spanmetricsconnector/connector_test.go +++ b/connector/spanmetricsconnector/connector_test.go @@ -1544,6 +1544,7 @@ func TestSpanMetrics_Events(t *testing.T) { }) } } + func TestExemplarsAreDiscardedAfterFlushing(t *testing.T) { tests := []struct { name string diff --git a/connector/spanmetricsconnector/internal/metrics/unit.go b/connector/spanmetricsconnector/internal/metrics/unit.go index b72423956ef1..14d726aaf9f4 100644 --- a/connector/spanmetricsconnector/internal/metrics/unit.go +++ b/connector/spanmetricsconnector/internal/metrics/unit.go @@ -20,8 +20,10 @@ const ( type Unit int8 -var _ encoding.TextMarshaler = (*Unit)(nil) -var _ encoding.TextUnmarshaler = (*Unit)(nil) +var ( + _ encoding.TextMarshaler = (*Unit)(nil) + _ encoding.TextUnmarshaler = (*Unit)(nil) +) func (u Unit) String() string { switch u { From 686d09914c1b6d50e2f08b60f8a1ba4dde8a1f39 Mon Sep 17 00:00:00 2001 From: Vihas Makwana <121151420+VihasMakwana@users.noreply.github.com> Date: Thu, 14 Nov 2024 23:52:20 +0530 Subject: [PATCH 04/14] [chore][allowlist] remove `emreyalvac` as he's already a member (#36377) This PR fixes the `make check` failure on `main`. [CI Link](https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/runs/11830611046/job/32964502752) cc: @emreyalvac --- cmd/githubgen/allowlist.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/githubgen/allowlist.txt b/cmd/githubgen/allowlist.txt index 8bc9f85352b9..f365aa1f6151 100644 --- a/cmd/githubgen/allowlist.txt +++ b/cmd/githubgen/allowlist.txt @@ -1,5 +1,4 @@ Caleb-Hurshman -emreyalvac cheempz jerrytfleung driverpt From 40dbf0b5487fd20e2024b66d72cd9945b90eca98 Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Thu, 14 Nov 2024 14:01:24 -0500 Subject: [PATCH 05/14] [chore][pmetricutiltest] Use native structs in options (#36380) Same as #36354 but for metrics --- .../internal/plogutiltest/logs_test.go | 1 - .../internal/pmetricutil/metrics_test.go | 78 ++++++++++++++----- .../internal/pmetricutiltest/metrics.go | 76 ++++++++---------- .../internal/pmetricutiltest/metrics_test.go | 77 +++++++----------- connector/routingconnector/metrics_test.go | 19 +++-- 5 files changed, 133 insertions(+), 118 deletions(-) diff --git a/connector/routingconnector/internal/plogutiltest/logs_test.go b/connector/routingconnector/internal/plogutiltest/logs_test.go index 7b3378bbabf4..0f9d0de631aa 100644 --- a/connector/routingconnector/internal/plogutiltest/logs_test.go +++ b/connector/routingconnector/internal/plogutiltest/logs_test.go @@ -18,7 +18,6 @@ func TestNewLogs(t *testing.T) { expected := plog.NewLogs() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("", "", ""))) assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New())) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New())) }) t.Run("simple", func(t *testing.T) { diff --git a/connector/routingconnector/internal/pmetricutil/metrics_test.go b/connector/routingconnector/internal/pmetricutil/metrics_test.go index 8c23b4232246..1eff9bc21201 100644 --- a/connector/routingconnector/internal/pmetricutil/metrics_test.go +++ b/connector/routingconnector/internal/pmetricutil/metrics_test.go @@ -130,12 +130,21 @@ func TestMoveMetricsWithContextIf(t *testing.T) { from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), to: pmetric.NewMetrics(), expectFrom: pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), - pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("C", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), + pmetricutiltest.Scope("D", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), ), - pmetricutiltest.WithResource('B', - pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.Resource("B", + pmetricutiltest.Scope("D", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), ), ), expectTo: pmetricutiltest.NewMetrics("B", "C", "EF", "GH"), @@ -159,13 +168,24 @@ func TestMoveMetricsWithContextIf(t *testing.T) { from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), to: pmetric.NewMetrics(), expectFrom: pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), - pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH")), + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("C", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), + pmetricutiltest.Scope("D", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), ), - pmetricutiltest.WithResource('B', - pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), - pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.Resource("B", + pmetricutiltest.Scope("C", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), + pmetricutiltest.Scope("D", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), ), ), expectTo: pmetricutiltest.NewMetrics("A", "D", "F", "GH"), @@ -189,13 +209,23 @@ func TestMoveMetricsWithContextIf(t *testing.T) { from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), to: pmetric.NewMetrics(), expectFrom: pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), - pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("C", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), + pmetricutiltest.Scope("D", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), ), - pmetricutiltest.WithResource('B', - pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('F', "GH")), - pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.Resource("B", + pmetricutiltest.Scope("C", + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), + pmetricutiltest.Scope("D", + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), ), ), expectTo: pmetricutiltest.NewMetrics("B", "CD", "E", "GH"), @@ -209,9 +239,17 @@ func TestMoveMetricsWithContextIf(t *testing.T) { to: pmetricutiltest.NewMetrics("1", "2", "3", "4"), expectFrom: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"), expectTo: pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('1', pmetricutiltest.WithScope('2', pmetricutiltest.WithMetric('3', "4"))), - pmetricutiltest.WithResource('A', pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH"))), - pmetricutiltest.WithResource('B', pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH"))), + pmetricutiltest.Resource("1", pmetricutiltest.Scope("2", + pmetricutiltest.Metric("3", pmetricutiltest.NumberDataPoint("4")), + )), + pmetricutiltest.Resource("A", pmetricutiltest.Scope("D", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + )), + pmetricutiltest.Resource("B", pmetricutiltest.Scope("D", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + )), ), }, } diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics.go b/connector/routingconnector/internal/pmetricutiltest/metrics.go index a908e1638e63..18b59eb29cf8 100644 --- a/connector/routingconnector/internal/pmetricutiltest/metrics.go +++ b/connector/routingconnector/internal/pmetricutiltest/metrics.go @@ -44,58 +44,44 @@ func NewMetrics(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.M return md } -type Resource struct { - id byte - scopes []Scope -} - -type Scope struct { - id byte - metrics []Metric -} - -type Metric struct { - id byte - dataPoints string +func NewMetricsFromOpts(resources ...pmetric.ResourceMetrics) pmetric.Metrics { + md := pmetric.NewMetrics() + for _, resource := range resources { + resource.CopyTo(md.ResourceMetrics().AppendEmpty()) + } + return md } -func WithResource(id byte, scopes ...Scope) Resource { - r := Resource{id: id} - r.scopes = append(r.scopes, scopes...) - return r +func Resource(id string, scopes ...pmetric.ScopeMetrics) pmetric.ResourceMetrics { + rm := pmetric.NewResourceMetrics() + rm.Resource().Attributes().PutStr("resourceName", "resource"+id) + for _, scope := range scopes { + scope.CopyTo(rm.ScopeMetrics().AppendEmpty()) + } + return rm } -func WithScope(id byte, metrics ...Metric) Scope { - s := Scope{id: id} - s.metrics = append(s.metrics, metrics...) +func Scope(id string, metrics ...pmetric.Metric) pmetric.ScopeMetrics { + s := pmetric.NewScopeMetrics() + s.Scope().SetName("scope" + id) + for _, metric := range metrics { + metric.CopyTo(s.Metrics().AppendEmpty()) + } return s } -func WithMetric(id byte, dataPoints string) Metric { - return Metric{id: id, dataPoints: dataPoints} +func Metric(id string, dps ...pmetric.NumberDataPoint) pmetric.Metric { + m := pmetric.NewMetric() + m.SetName("metric" + id) + g := m.SetEmptyGauge() + for _, dp := range dps { + dp.CopyTo(g.DataPoints().AppendEmpty()) + } + return m } -// NewMetricsFromOpts creates a pmetric.Metrics with the specified resources, scopes, metrics, -// and data points. The general idea is the same as NewMetrics, but this function allows for -// more flexibility in creating non-uniform structures. -func NewMetricsFromOpts(resources ...Resource) pmetric.Metrics { - md := pmetric.NewMetrics() - for _, resource := range resources { - r := md.ResourceMetrics().AppendEmpty() - r.Resource().Attributes().PutStr("resourceName", "resource"+string(resource.id)) - for _, scope := range resource.scopes { - s := r.ScopeMetrics().AppendEmpty() - s.Scope().SetName("scope" + string(scope.id)) - for _, metric := range scope.metrics { - m := s.Metrics().AppendEmpty() - m.SetName("metric" + string(metric.id)) - dps := m.SetEmptyGauge().DataPoints() - for i := 0; i < len(metric.dataPoints); i++ { - dp := dps.AppendEmpty() - dp.Attributes().PutStr("dpName", "dp"+string(metric.dataPoints[i])) - } - } - } - } - return md +func NumberDataPoint(id string) pmetric.NumberDataPoint { + dp := pmetric.NewNumberDataPoint() + dp.Attributes().PutStr("dpName", "dp"+id) + return dp } diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics_test.go b/connector/routingconnector/internal/pmetricutiltest/metrics_test.go index abfcde29d72c..9fcd2edebef6 100644 --- a/connector/routingconnector/internal/pmetricutiltest/metrics_test.go +++ b/connector/routingconnector/internal/pmetricutiltest/metrics_test.go @@ -34,15 +34,12 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD return md }() - fromOpts := pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('B', - pmetricutiltest.WithMetric('C', "D"), - ), - ), - ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "C", "D"))) - assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("B", pmetricutiltest.Metric("C", pmetricutiltest.NumberDataPoint("D"))), + ), + ))) }) t.Run("two_resources", func(t *testing.T) { @@ -68,20 +65,15 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceB.scopeC.metricD.dpE return md }() - fromOpts := pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('C', - pmetricutiltest.WithMetric('D', "E"), - ), + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("AB", "C", "D", "E"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("C", pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E"))), ), - pmetricutiltest.WithResource('B', - pmetricutiltest.WithScope('C', - pmetricutiltest.WithMetric('D', "E"), - ), + pmetricutiltest.Resource("B", + pmetricutiltest.Scope("C", pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E"))), ), - ) - assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("AB", "C", "D", "E"))) - assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) + ))) }) t.Run("two_scopes", func(t *testing.T) { @@ -105,18 +97,13 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeC.metricD.dpE return md }() - fromOpts := pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('B', - pmetricutiltest.WithMetric('D', "E"), - ), - pmetricutiltest.WithScope('C', - pmetricutiltest.WithMetric('D', "E"), - ), - ), - ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "BC", "D", "E"))) - assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("B", pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E"))), + pmetricutiltest.Scope("C", pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E"))), + ), + ))) }) t.Run("two_metrics", func(t *testing.T) { @@ -138,16 +125,15 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE return md }() - fromOpts := pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('B', - pmetricutiltest.WithMetric('C', "E"), - pmetricutiltest.WithMetric('D', "E"), + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "CD", "E"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("B", + pmetricutiltest.Metric("C", pmetricutiltest.NumberDataPoint("E")), + pmetricutiltest.Metric("D", pmetricutiltest.NumberDataPoint("E")), ), ), - ) - assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "CD", "E"))) - assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) + ))) }) t.Run("two_datapoints", func(t *testing.T) { @@ -166,14 +152,11 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE return md }() - fromOpts := pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('B', - pmetricutiltest.WithMetric('C', "DE"), - ), - ), - ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "C", "DE"))) - assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("B", pmetricutiltest.Metric("C", pmetricutiltest.NumberDataPoint("D"), pmetricutiltest.NumberDataPoint("E"))), + ), + ))) }) } diff --git a/connector/routingconnector/metrics_test.go b/connector/routingconnector/metrics_test.go index f87a15ff613c..fbbf7b383381 100644 --- a/connector/routingconnector/metrics_test.go +++ b/connector/routingconnector/metrics_test.go @@ -842,12 +842,21 @@ func TestMetricsConnectorDetailed(t *testing.T) { expectSink0: pmetricutiltest.NewMetrics("B", "D", "EF", "GH"), expectSink1: pmetric.Metrics{}, expectSinkD: pmetricutiltest.NewMetricsFromOpts( - pmetricutiltest.WithResource('A', - pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), - pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.Resource("A", + pmetricutiltest.Scope("C", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), + pmetricutiltest.Scope("D", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), ), - pmetricutiltest.WithResource('B', - pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.Resource("B", + pmetricutiltest.Scope("C", + pmetricutiltest.Metric("E", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + pmetricutiltest.Metric("F", pmetricutiltest.NumberDataPoint("G"), pmetricutiltest.NumberDataPoint("H")), + ), ), ), }, From c406f448efcccf1ce82cc4271d05a94bde517b71 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Fri, 15 Nov 2024 01:31:53 +0100 Subject: [PATCH 06/14] [chore]: enable gofumpt linter for internal (#36369) #### Description [gofumpt](https://golangci-lint.run/usage/linters/#gofumpt) enforces a stricter format than gofmt Part of https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/36363 Signed-off-by: Matthieu MOREL --- internal/aws/awsutil/conn.go | 10 ++-- internal/aws/awsutil/conn_test.go | 2 +- internal/aws/containerinsight/utils_test.go | 3 +- internal/aws/cwlogs/cwlog_client.go | 12 ++-- internal/aws/cwlogs/cwlog_client_test.go | 60 ++++++++++++------- internal/aws/cwlogs/pusher.go | 17 +++--- internal/aws/cwlogs/pusher_test.go | 10 +++- internal/aws/ecsutil/metadata_provider.go | 2 - internal/aws/k8s/k8sclient/helpers_test.go | 6 +- internal/aws/k8s/k8sclient/job.go | 3 +- internal/aws/k8s/k8sclient/replicaset.go | 3 +- internal/aws/proxy/conn.go | 1 - internal/aws/proxy/conn_test.go | 7 ++- internal/aws/xray/telemetry/nop_sender.go | 3 +- .../aws/xray/testdata/sampleapp/sample.go | 3 +- internal/aws/xray/xray_client.go | 8 ++- internal/common/docker/images.go | 4 +- internal/common/testutil/testutil_test.go | 1 + .../coreinternal/aggregateutil/aggregate.go | 12 ++-- .../attraction/attraction_test.go | 7 ++- .../goldendataset/traces_generator.go | 6 +- .../timeutils/internal/ctimefmt/ctimefmt.go | 8 ++- .../internal/ctimefmt/ctimefmt_test.go | 14 +++-- .../coreinternal/timeutils/parser_test.go | 20 +++---- internal/docker/matcher.go | 1 - internal/filter/filterset/config.go | 4 +- .../filterset/regexp/regexpfilterset_test.go | 22 ++++--- .../filterset/strict/strictfilterset_test.go | 12 ++-- internal/k8sconfig/config.go | 1 - internal/k8stest/client.go | 3 +- internal/kubelet/client_test.go | 8 ++- .../aws/ec2/metadata_test.go | 3 +- internal/otelarrow/admission2/boundedqueue.go | 6 +- internal/otelarrow/test/e2e_test.go | 22 +++---- internal/pdatautil/logs_test.go | 14 +++-- internal/splunk/common.go | 4 +- internal/sqlquery/scraper_test.go | 23 +++---- 37 files changed, 188 insertions(+), 157 deletions(-) diff --git a/internal/aws/awsutil/conn.go b/internal/aws/awsutil/conn.go index 820b03c75c38..32963e811128 100644 --- a/internal/aws/awsutil/conn.go +++ b/internal/aws/awsutil/conn.go @@ -45,7 +45,8 @@ const ( // newHTTPClient returns new HTTP client instance with provided configuration. func newHTTPClient(logger *zap.Logger, maxIdle int, requestTimeout int, noVerify bool, - proxyAddress string) (*http.Client, error) { + proxyAddress string, +) (*http.Client, error) { logger.Debug("Using proxy address: ", zap.String("proxyAddr", proxyAddress), ) @@ -206,7 +207,6 @@ func (c *Conn) newAWSSession(logger *zap.Logger, roleArn string, region string) s, err = session.NewSession(&aws.Config{ Credentials: stsCreds, }) - if err != nil { logger.Error("Error in creating session object : ", zap.Error(err)) return s, err @@ -245,7 +245,8 @@ func getSTSCreds(logger *zap.Logger, region string, roleArn string) (*credential // AWS STS recommends that you provide both the Region and endpoint when you make calls to a Regional endpoint. // Reference: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code func getSTSCredsFromRegionEndpoint(logger *zap.Logger, sess *session.Session, region string, - roleArn string) *credentials.Credentials { + roleArn string, +) *credentials.Credentials { regionalEndpoint := getSTSRegionalEndpoint(region) // if regionalEndpoint is "", the STS endpoint is Global endpoint for classic regions except ap-east-1 - (HKG) // for other opt-in regions, region value will create STS regional endpoint. @@ -259,7 +260,8 @@ func getSTSCredsFromRegionEndpoint(logger *zap.Logger, sess *session.Session, re // getSTSCredsFromPrimaryRegionEndpoint fetches STS credentials for provided roleARN from primary region endpoint in // the respective partition. func getSTSCredsFromPrimaryRegionEndpoint(logger *zap.Logger, t *session.Session, roleArn string, - region string) *credentials.Credentials { + region string, +) *credentials.Credentials { logger.Info("Credentials for provided RoleARN being fetched from STS primary region endpoint.") partitionID := getPartition(region) switch partitionID { diff --git a/internal/aws/awsutil/conn_test.go b/internal/aws/awsutil/conn_test.go index 363b5ac5ec49..5946b36ff2b8 100644 --- a/internal/aws/awsutil/conn_test.go +++ b/internal/aws/awsutil/conn_test.go @@ -58,7 +58,7 @@ func TestRegionEnv(t *testing.T) { region := "us-east-1" t.Setenv("AWS_REGION", region) - var m = &mockConn{} + m := &mockConn{} var expectedSession *session.Session expectedSession, _ = session.NewSession() m.sn = expectedSession diff --git a/internal/aws/containerinsight/utils_test.go b/internal/aws/containerinsight/utils_test.go index e1fe4df48c2c..0a702d7f5d76 100644 --- a/internal/aws/containerinsight/utils_test.go +++ b/internal/aws/containerinsight/utils_test.go @@ -132,7 +132,8 @@ func convertToFloat64(value any) float64 { } func checkMetricsAreExpected(t *testing.T, md pmetric.Metrics, fields map[string]any, tags map[string]string, - expectedUnits map[string]string) { + expectedUnits map[string]string, +) { rms := md.ResourceMetrics() assert.Equal(t, 1, rms.Len()) diff --git a/internal/aws/cwlogs/cwlog_client.go b/internal/aws/cwlogs/cwlog_client.go index 106aca07390a..a8a17e243b52 100644 --- a/internal/aws/cwlogs/cwlog_client.go +++ b/internal/aws/cwlogs/cwlog_client.go @@ -26,9 +26,7 @@ const ( errCodeThrottlingException = "ThrottlingException" ) -var ( - containerInsightsRegexPattern = regexp.MustCompile(`^/aws/.*containerinsights/.*/(performance|prometheus)$`) -) +var containerInsightsRegexPattern = regexp.MustCompile(`^/aws/.*containerinsights/.*/(performance|prometheus)$`) // Possible exceptions are combination of common errors (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonErrors.html) // and API specific erros (e.g. https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html#API_PutLogEvents_Errors) @@ -53,10 +51,12 @@ func WithUserAgentExtras(userAgentExtras ...string) ClientOption { // Create a log client based on the actual cloudwatch logs client. func newCloudWatchLogClient(svc cloudwatchlogsiface.CloudWatchLogsAPI, logRetention int64, tags map[string]*string, logger *zap.Logger) *Client { - logClient := &Client{svc: svc, + logClient := &Client{ + svc: svc, logRetention: logRetention, tags: tags, - logger: logger} + logger: logger, + } return logClient } @@ -124,7 +124,7 @@ func (client *Client) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput, retr } } - //TODO: Should have metrics to provide visibility of these failures + // TODO: Should have metrics to provide visibility of these failures if response != nil { if response.RejectedLogEventsInfo != nil { rejectedLogEventsInfo := response.RejectedLogEventsInfo diff --git a/internal/aws/cwlogs/cwlog_client_test.go b/internal/aws/cwlogs/cwlog_client_test.go index 2832251e47e4..b3d430c2e5c9 100644 --- a/internal/aws/cwlogs/cwlog_client_test.go +++ b/internal/aws/cwlogs/cwlog_client_test.go @@ -27,7 +27,8 @@ func newAlwaysPassMockLogClient(putLogEventsFunc func(args mock.Arguments)) *Cli svc.On("PutLogEvents", mock.Anything).Return( &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken}, + NextSequenceToken: &expectedNextSequenceToken, + }, nil).Run(putLogEventsFunc) svc.On("CreateLogGroup", mock.Anything).Return(new(cloudwatchlogs.CreateLogGroupOutput), nil) @@ -36,7 +37,8 @@ func newAlwaysPassMockLogClient(putLogEventsFunc func(args mock.Arguments)) *Cli svc.On("DescribeLogStreams", mock.Anything).Return( &cloudwatchlogs.DescribeLogStreamsOutput{ - LogStreams: []*cloudwatchlogs.LogStream{{UploadSequenceToken: &expectedNextSequenceToken}}}, + LogStreams: []*cloudwatchlogs.LogStream{{UploadSequenceToken: &expectedNextSequenceToken}}, + }, nil) return newCloudWatchLogClient(svc, 0, nil, logger) } @@ -77,11 +79,13 @@ func (svc *mockCloudWatchLogsClient) TagResource(input *cloudwatchlogs.TagResour } // Tests -var previousSequenceToken = "0000" -var expectedNextSequenceToken = "1111" -var logGroup = "logGroup" -var logStreamName = "logStream" -var emptySequenceToken = "" +var ( + previousSequenceToken = "0000" + expectedNextSequenceToken = "1111" + logGroup = "logGroup" + logStreamName = "logStream" + emptySequenceToken = "" +) func TestPutLogEvents_HappyCase(t *testing.T) { logger := zap.NewNop() @@ -92,7 +96,8 @@ func TestPutLogEvents_HappyCase(t *testing.T) { SequenceToken: &previousSequenceToken, } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, nil) @@ -114,7 +119,8 @@ func TestPutLogEvents_HappyCase_SomeRejectedInfo(t *testing.T) { rejectedLogEventsInfo := &cloudwatchlogs.RejectedLogEventsInfo{ ExpiredLogEventEndIndex: aws.Int64(1), TooNewLogEventStartIndex: aws.Int64(2), - TooOldLogEventEndIndex: aws.Int64(3)} + TooOldLogEventEndIndex: aws.Int64(3), + } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: &expectedNextSequenceToken, RejectedLogEventsInfo: rejectedLogEventsInfo, @@ -138,7 +144,8 @@ func TestPutLogEvents_NonAWSError(t *testing.T) { SequenceToken: &previousSequenceToken, } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, errors.New("some random error")).Once() @@ -158,7 +165,8 @@ func TestPutLogEvents_InvalidParameterException(t *testing.T) { SequenceToken: &previousSequenceToken, } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } invalidParameterException := &cloudwatchlogs.InvalidParameterException{} svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, invalidParameterException).Once() @@ -179,7 +187,8 @@ func TestPutLogEvents_OperationAbortedException(t *testing.T) { SequenceToken: &previousSequenceToken, } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } operationAbortedException := &cloudwatchlogs.OperationAbortedException{} svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, operationAbortedException).Once() @@ -200,7 +209,8 @@ func TestPutLogEvents_ServiceUnavailableException(t *testing.T) { SequenceToken: &previousSequenceToken, } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } serviceUnavailableException := &cloudwatchlogs.ServiceUnavailableException{} svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, serviceUnavailableException).Once() @@ -221,7 +231,8 @@ func TestPutLogEvents_UnknownException(t *testing.T) { SequenceToken: &previousSequenceToken, } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } unknownException := awserr.New("unknownException", "", nil) svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, unknownException).Once() @@ -242,7 +253,8 @@ func TestPutLogEvents_ThrottlingException(t *testing.T) { SequenceToken: &previousSequenceToken, } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } throttlingException := awserr.New(errCodeThrottlingException, "", nil) svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, throttlingException).Once() @@ -264,7 +276,8 @@ func TestPutLogEvents_ResourceNotFoundException(t *testing.T) { } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } awsErr := &cloudwatchlogs.ResourceNotFoundException{} svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, awsErr).Once() @@ -291,7 +304,8 @@ func TestLogRetention_NeverExpire(t *testing.T) { } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } awsErr := &cloudwatchlogs.ResourceNotFoundException{} svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, awsErr).Once() @@ -326,7 +340,8 @@ func TestLogRetention_RetentionDaysInputted(t *testing.T) { } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } awsErr := &cloudwatchlogs.ResourceNotFoundException{} svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, awsErr).Once() @@ -362,7 +377,8 @@ func TestSetTags_NotCalled(t *testing.T) { } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } awsErr := &cloudwatchlogs.ResourceNotFoundException{} svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, awsErr).Once() @@ -397,7 +413,8 @@ func TestSetTags_Called(t *testing.T) { } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: &expectedNextSequenceToken} + NextSequenceToken: &expectedNextSequenceToken, + } awsErr := &cloudwatchlogs.ResourceNotFoundException{} avalue := "avalue" @@ -433,7 +450,8 @@ func TestPutLogEvents_AllRetriesFail(t *testing.T) { } putLogEventsOutput := &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: nil} + NextSequenceToken: nil, + } awsErr := &cloudwatchlogs.ResourceNotFoundException{} svc.On("PutLogEvents", putLogEventsInput).Return(putLogEventsOutput, awsErr).Twice() diff --git a/internal/aws/cwlogs/pusher.go b/internal/aws/cwlogs/pusher.go index 93c68222e943..af8417dc8aab 100644 --- a/internal/aws/cwlogs/pusher.go +++ b/internal/aws/cwlogs/pusher.go @@ -29,9 +29,7 @@ const ( evenTimestampLimitInFuture = -2 * time.Hour // None of the log events in the batch can be more than 2 hours in the future. ) -var ( - maxEventPayloadBytes = defaultMaxEventPayloadBytes -) +var maxEventPayloadBytes = defaultMaxEventPayloadBytes // Event struct to present a log event. type Event struct { @@ -48,7 +46,8 @@ func NewEvent(timestampMs int64, message string) *Event { event := &Event{ InputLogEvent: &cloudwatchlogs.InputLogEvent{ Timestamp: aws.Int64(timestampMs), - Message: aws.String(message)}, + Message: aws.String(message), + }, } return event } @@ -115,7 +114,8 @@ func newEventBatch(key StreamKey) *eventBatch { putLogEventsInput: &cloudwatchlogs.PutLogEventsInput{ LogGroupName: aws.String(key.LogGroupName), LogStreamName: aws.String(key.LogStreamName), - LogEvents: make([]*cloudwatchlogs.InputLogEvent, 0, maxRequestEventCount)}, + LogEvents: make([]*cloudwatchlogs.InputLogEvent, 0, maxRequestEventCount), + }, } } @@ -194,7 +194,8 @@ type logPusher struct { // NewPusher creates a logPusher instance func NewPusher(streamKey StreamKey, retryCnt int, - svcStructuredLog Client, logger *zap.Logger) Pusher { + svcStructuredLog Client, logger *zap.Logger, +) Pusher { pusher := newLogPusher(streamKey, svcStructuredLog, logger) pusher.retryCnt = defaultRetryCount @@ -207,7 +208,8 @@ func NewPusher(streamKey StreamKey, retryCnt int, // Only create a logPusher, but not start the instance. func newLogPusher(streamKey StreamKey, - svcStructuredLog Client, logger *zap.Logger) *logPusher { + svcStructuredLog Client, logger *zap.Logger, +) *logPusher { pusher := &logPusher{ logGroupName: aws.String(streamKey.LogGroupName), logStreamName: aws.String(streamKey.LogStreamName), @@ -260,7 +262,6 @@ func (p *logPusher) pushEventBatch(req any) error { startTime := time.Now() err := p.svcStructuredLog.PutLogEvents(putLogEventsInput, p.retryCnt) - if err != nil { return err } diff --git a/internal/aws/cwlogs/pusher_test.go b/internal/aws/cwlogs/pusher_test.go index 5ba30bba0349..46249cc35515 100644 --- a/internal/aws/cwlogs/pusher_test.go +++ b/internal/aws/cwlogs/pusher_test.go @@ -83,7 +83,9 @@ func TestLogEventBatch_sortLogEvents(t *testing.T) { totalEvents := 10 logEventBatch := &eventBatch{ putLogEventsInput: &cloudwatchlogs.PutLogEventsInput{ - LogEvents: make([]*cloudwatchlogs.InputLogEvent, 0, totalEvents)}} + LogEvents: make([]*cloudwatchlogs.InputLogEvent, 0, totalEvents), + }, + } for i := 0; i < totalEvents; i++ { timestamp := rand.Int() @@ -120,8 +122,10 @@ func newMockPusher() *logPusher { // pusher Tests // -var timestampMs = time.Now().UnixNano() / 1e6 -var msg = "test log message" +var ( + timestampMs = time.Now().UnixNano() / 1e6 + msg = "test log message" +) func TestPusher_newLogEventBatch(t *testing.T) { p := newMockPusher() diff --git a/internal/aws/ecsutil/metadata_provider.go b/internal/aws/ecsutil/metadata_provider.go index 3968d88868ed..75698eedd26b 100644 --- a/internal/aws/ecsutil/metadata_provider.go +++ b/internal/aws/ecsutil/metadata_provider.go @@ -64,7 +64,6 @@ func (md *ecsMetadataProviderImpl) FetchTaskMetadata() (*TaskMetadata, error) { taskMetadata := &TaskMetadata{} err = json.NewDecoder(bytes.NewReader(resp)).Decode(taskMetadata) - if err != nil { return nil, fmt.Errorf("encountered unexpected error reading response from ECS Task Metadata Endpoint: %w", err) } @@ -82,7 +81,6 @@ func (md *ecsMetadataProviderImpl) FetchContainerMetadata() (*ContainerMetadata, containerMetadata := &ContainerMetadata{} err = json.NewDecoder(bytes.NewReader(resp)).Decode(containerMetadata) - if err != nil { return nil, fmt.Errorf("encountered unexpected error reading response from ECS Container Metadata Endpoint: %w", err) } diff --git a/internal/aws/k8s/k8sclient/helpers_test.go b/internal/aws/k8s/k8sclient/helpers_test.go index cce65edac1f5..fe15f1544cc6 100644 --- a/internal/aws/k8s/k8sclient/helpers_test.go +++ b/internal/aws/k8s/k8sclient/helpers_test.go @@ -10,11 +10,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -type mockReflectorSyncChecker struct { -} +type mockReflectorSyncChecker struct{} func (m *mockReflectorSyncChecker) Check(_ cacheReflector, _ string) { - } var kubeConfigPath string @@ -54,7 +52,7 @@ users: if err != nil { t.Error(err) } - if err := os.WriteFile(tmpfile.Name(), []byte(content), 0600); err != nil { + if err := os.WriteFile(tmpfile.Name(), []byte(content), 0o600); err != nil { t.Error(err) } // overwrite the default kube config path diff --git a/internal/aws/k8s/k8sclient/job.go b/internal/aws/k8s/k8sclient/job.go index 3076e3546433..b565482285bd 100644 --- a/internal/aws/k8s/k8sclient/job.go +++ b/internal/aws/k8s/k8sclient/job.go @@ -27,8 +27,7 @@ type JobClient interface { JobToCronJob() map[string]string } -type noOpJobClient struct { -} +type noOpJobClient struct{} func (nc *noOpJobClient) JobToCronJob() map[string]string { return map[string]string{} diff --git a/internal/aws/k8s/k8sclient/replicaset.go b/internal/aws/k8s/k8sclient/replicaset.go index cfa73d773e67..7d911c6a6532 100644 --- a/internal/aws/k8s/k8sclient/replicaset.go +++ b/internal/aws/k8s/k8sclient/replicaset.go @@ -27,8 +27,7 @@ type ReplicaSetClient interface { ReplicaSetToDeployment() map[string]string } -type noOpReplicaSetClient struct { -} +type noOpReplicaSetClient struct{} func (nc *noOpReplicaSetClient) ReplicaSetToDeployment() map[string]string { return map[string]string{} diff --git a/internal/aws/proxy/conn.go b/internal/aws/proxy/conn.go index e0e850428b10..c9eb13f2e34a 100644 --- a/internal/aws/proxy/conn.go +++ b/internal/aws/proxy/conn.go @@ -60,7 +60,6 @@ var newAWSSession = func(roleArn string, region string, log *zap.Logger) (*sessi sess, err := session.NewSession(&aws.Config{ Credentials: stsCreds, }) - if err != nil { return nil, err } diff --git a/internal/aws/proxy/conn_test.go b/internal/aws/proxy/conn_test.go index 97584a897205..6be4d76f6655 100644 --- a/internal/aws/proxy/conn_test.go +++ b/internal/aws/proxy/conn_test.go @@ -43,7 +43,8 @@ func logSetup() (*zap.Logger, *observer.ObservedLogs) { } func setupMock(sess *session.Session) (f1 func(s *session.Session) (string, error), - f2 func(roleArn string, region string, logger *zap.Logger) (*session.Session, error)) { + f2 func(roleArn string, region string, logger *zap.Logger) (*session.Session, error), +) { f1 = getEC2Region f2 = newAWSSession m := mock{sn: sess} @@ -399,8 +400,7 @@ func TestGetSTSCredsFromPrimaryRegionEndpoint(t *testing.T) { "expected error message") } -type mockAWSErr struct { -} +type mockAWSErr struct{} func (m *mockAWSErr) Error() string { return "mockAWSErr" @@ -433,6 +433,7 @@ func (m *mockProvider) Retrieve() (credentials.Value, error) { func (m *mockProvider) IsExpired() bool { return true } + func TestSTSRegionalEndpointDisabled(t *testing.T) { logger, recordedLogs := logSetup() diff --git a/internal/aws/xray/telemetry/nop_sender.go b/internal/aws/xray/telemetry/nop_sender.go index c87ac6edcaf5..698967e99657 100644 --- a/internal/aws/xray/telemetry/nop_sender.go +++ b/internal/aws/xray/telemetry/nop_sender.go @@ -12,8 +12,7 @@ func NewNopSender() Sender { var nopSenderInstance Sender = &nopSender{} -type nopSender struct { -} +type nopSender struct{} func (n nopSender) Rotate() *xray.TelemetryRecord { return nil diff --git a/internal/aws/xray/testdata/sampleapp/sample.go b/internal/aws/xray/testdata/sampleapp/sample.go index ab0bd311dd7d..7510a660dddc 100644 --- a/internal/aws/xray/testdata/sampleapp/sample.go +++ b/internal/aws/xray/testdata/sampleapp/sample.go @@ -28,7 +28,8 @@ type Record struct { func main() { dynamo = dynamodb.New(session.Must(session.NewSession( &aws.Config{ - Region: aws.String("us-west-2")}, + Region: aws.String("us-west-2"), + }, ))) xray.AWS(dynamo.Client) diff --git a/internal/aws/xray/xray_client.go b/internal/aws/xray/xray_client.go index adb8d0ba7262..072d267a0c5c 100644 --- a/internal/aws/xray/xray_client.go +++ b/internal/aws/xray/xray_client.go @@ -20,9 +20,11 @@ import ( ) // Constant prefixes used to identify information in user-agent -const agentPrefix = "xray-otel-exporter/" -const execEnvPrefix = " exec-env/" -const osPrefix = " OS/" +const ( + agentPrefix = "xray-otel-exporter/" + execEnvPrefix = " exec-env/" + osPrefix = " OS/" +) // XRayClient represents X-Ray client. type XRayClient interface { diff --git a/internal/common/docker/images.go b/internal/common/docker/images.go index 8e5b3353f071..b97c35aa8aee 100644 --- a/internal/common/docker/images.go +++ b/internal/common/docker/images.go @@ -10,9 +10,7 @@ import ( "go.uber.org/zap" ) -var ( - extractImageRegexp = regexp.MustCompile(`^(?P([^/\s]+/)?([^:\s]+))(:(?P[^@\s]+))?(@sha256:(?P\d+))?$`) -) +var extractImageRegexp = regexp.MustCompile(`^(?P([^/\s]+/)?([^:\s]+))(:(?P[^@\s]+))?(@sha256:(?P\d+))?$`) type ImageRef struct { Repository string diff --git a/internal/common/testutil/testutil_test.go b/internal/common/testutil/testutil_test.go index 2f662ca8afc1..0b43ad8407a6 100644 --- a/internal/common/testutil/testutil_test.go +++ b/internal/common/testutil/testutil_test.go @@ -26,6 +26,7 @@ func TestGetAvailableLocalAddress(t *testing.T) { require.Error(t, err) require.Nil(t, ln1) } + func TestGetAvailableLocalUDPAddress(t *testing.T) { addr := GetAvailableLocalNetworkAddress(t, "udp") // Endpoint should be free. diff --git a/internal/coreinternal/aggregateutil/aggregate.go b/internal/coreinternal/aggregateutil/aggregate.go index 0cd986a33037..198ca09bee0f 100644 --- a/internal/coreinternal/aggregateutil/aggregate.go +++ b/internal/coreinternal/aggregateutil/aggregate.go @@ -283,7 +283,8 @@ func mergeHistogramDataPoints(dpsMap map[string]pmetric.HistogramDataPointSlice, } func mergeExponentialHistogramDataPoints(dpsMap map[string]pmetric.ExponentialHistogramDataPointSlice, - to pmetric.ExponentialHistogramDataPointSlice) { + to pmetric.ExponentialHistogramDataPointSlice, +) { for _, dps := range dpsMap { dp := to.AppendEmpty() dps.At(0).MoveTo(dp) @@ -316,7 +317,8 @@ func mergeExponentialHistogramDataPoints(dpsMap map[string]pmetric.ExponentialHi } func groupNumberDataPoints(dps pmetric.NumberDataPointSlice, useStartTime bool, - dpsByAttrsAndTs map[string]pmetric.NumberDataPointSlice) { + dpsByAttrsAndTs map[string]pmetric.NumberDataPointSlice, +) { var keyHashParts []any for i := 0; i < dps.Len(); i++ { if useStartTime { @@ -331,7 +333,8 @@ func groupNumberDataPoints(dps pmetric.NumberDataPointSlice, useStartTime bool, } func groupHistogramDataPoints(dps pmetric.HistogramDataPointSlice, useStartTime bool, - dpsByAttrsAndTs map[string]pmetric.HistogramDataPointSlice) { + dpsByAttrsAndTs map[string]pmetric.HistogramDataPointSlice, +) { for i := 0; i < dps.Len(); i++ { dp := dps.At(i) keyHashParts := make([]any, 0, dp.ExplicitBounds().Len()+4) @@ -352,7 +355,8 @@ func groupHistogramDataPoints(dps pmetric.HistogramDataPointSlice, useStartTime } func groupExponentialHistogramDataPoints(dps pmetric.ExponentialHistogramDataPointSlice, useStartTime bool, - dpsByAttrsAndTs map[string]pmetric.ExponentialHistogramDataPointSlice) { + dpsByAttrsAndTs map[string]pmetric.ExponentialHistogramDataPointSlice, +) { for i := 0; i < dps.Len(); i++ { dp := dps.At(i) keyHashParts := make([]any, 0, 5) diff --git a/internal/coreinternal/attraction/attraction_test.go b/internal/coreinternal/attraction/attraction_test.go index a107462efed2..6d2b3cbe5441 100644 --- a/internal/coreinternal/attraction/attraction_test.go +++ b/internal/coreinternal/attraction/attraction_test.go @@ -400,7 +400,6 @@ func TestAttributes_Extract(t *testing.T) { cfg := &Settings{ Actions: []ActionKeyValue{ - {Key: "user_key", RegexPattern: "^\\/api\\/v1\\/document\\/(?P.*)\\/update\\/(?P.*)$", Action: EXTRACT}, }, } @@ -853,7 +852,8 @@ func TestInvalidConfig(t *testing.T) { }, errorString: "error creating AttrProc due to missing required field \"pattern\" for action \"extract\" at the 0-th action", }, - {name: "set value for extract", + { + name: "set value for extract", actionLists: []ActionKeyValue{ {Key: "Key", RegexPattern: "(?P.*?)$", Value: "value", Action: EXTRACT}, }, @@ -915,7 +915,8 @@ func TestValidConfiguration(t *testing.T) { compiledRegex := regexp.MustCompile(`^\/api\/v1\/document\/(?P.*)\/update$`) assert.Equal(t, []attributeAction{ {Key: "one", Action: DELETE}, - {Key: "two", Action: INSERT, + { + Key: "two", Action: INSERT, AttributeValue: &av, }, {Key: "three", FromAttribute: "two", Action: UPDATE}, diff --git a/internal/coreinternal/goldendataset/traces_generator.go b/internal/coreinternal/goldendataset/traces_generator.go index cc4a8baf439d..ebb093c0d636 100644 --- a/internal/coreinternal/goldendataset/traces_generator.go +++ b/internal/coreinternal/goldendataset/traces_generator.go @@ -50,7 +50,8 @@ func GenerateTraces(tracePairsFile string, spanPairsFile string) ([]ptrace.Trace // // The generated resource spans. If err is not nil, some or all of the resource spans fields will be nil. func appendResourceSpan(tracingInputs *PICTTracingInputs, spanPairsFile string, - random io.Reader, resourceSpansSlice ptrace.ResourceSpansSlice) error { + random io.Reader, resourceSpansSlice ptrace.ResourceSpansSlice, +) error { resourceSpan := resourceSpansSlice.AppendEmpty() err := appendScopeSpans(tracingInputs, spanPairsFile, random, resourceSpan.ScopeSpans()) if err != nil { @@ -61,7 +62,8 @@ func appendResourceSpan(tracingInputs *PICTTracingInputs, spanPairsFile string, } func appendScopeSpans(tracingInputs *PICTTracingInputs, spanPairsFile string, - random io.Reader, scopeSpansSlice ptrace.ScopeSpansSlice) error { + random io.Reader, scopeSpansSlice ptrace.ScopeSpansSlice, +) error { var count int switch tracingInputs.InstrumentationLibrary { case LibraryNone: diff --git a/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go b/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go index 921ebdc8c3c1..adb0c5dded54 100644 --- a/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go +++ b/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go @@ -16,9 +16,11 @@ import ( "time" ) -var ctimeRegexp = regexp.MustCompile(`%.`) -var invalidFractionalSecondsStrptime = regexp.MustCompile(`[^.,]%[Lfs]`) -var decimalsRegexp = regexp.MustCompile(`\d`) +var ( + ctimeRegexp = regexp.MustCompile(`%.`) + invalidFractionalSecondsStrptime = regexp.MustCompile(`[^.,]%[Lfs]`) + decimalsRegexp = regexp.MustCompile(`\d`) +) var ctimeSubstitutes = map[string]string{ "%Y": "2006", diff --git a/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt_test.go b/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt_test.go index 6605ec424d24..6e80091d6a5c 100644 --- a/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt_test.go +++ b/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt_test.go @@ -16,12 +16,14 @@ import ( "github.com/stretchr/testify/require" ) -var format1 = "%Y-%m-%d %H:%M:%S.%f" -var format2 = "%Y-%m-%d %l:%M:%S.%L %P, %a" -var value1 = "2019-01-02 15:04:05.666666" -var value2 = "2019-01-02 3:04:05.666 pm, Wed" -var dt1 = time.Date(2019, 1, 2, 15, 4, 5, 666666000, time.UTC) -var dt2 = time.Date(2019, 1, 2, 15, 4, 5, 666000000, time.UTC) +var ( + format1 = "%Y-%m-%d %H:%M:%S.%f" + format2 = "%Y-%m-%d %l:%M:%S.%L %P, %a" + value1 = "2019-01-02 15:04:05.666666" + value2 = "2019-01-02 3:04:05.666 pm, Wed" + dt1 = time.Date(2019, 1, 2, 15, 4, 5, 666666000, time.UTC) + dt2 = time.Date(2019, 1, 2, 15, 4, 5, 666000000, time.UTC) +) func TestFormat(t *testing.T) { s, err := Format(format1, dt1) diff --git a/internal/coreinternal/timeutils/parser_test.go b/internal/coreinternal/timeutils/parser_test.go index 80d572ef2c70..256154c30332 100644 --- a/internal/coreinternal/timeutils/parser_test.go +++ b/internal/coreinternal/timeutils/parser_test.go @@ -19,40 +19,40 @@ func TestParseGoTimeBadLocation(t *testing.T) { func Test_setTimestampYear(t *testing.T) { t.Run("Normal", func(t *testing.T) { Now = func() time.Time { - return time.Date(2020, 06, 16, 3, 31, 34, 525, time.UTC) + return time.Date(2020, 0o6, 16, 3, 31, 34, 525, time.UTC) } - noYear := time.Date(0, 06, 16, 3, 31, 34, 525, time.UTC) + noYear := time.Date(0, 0o6, 16, 3, 31, 34, 525, time.UTC) yearAdded := SetTimestampYear(noYear) - expected := time.Date(2020, 06, 16, 3, 31, 34, 525, time.UTC) + expected := time.Date(2020, 0o6, 16, 3, 31, 34, 525, time.UTC) require.Equal(t, expected, yearAdded) }) t.Run("FutureOneDay", func(t *testing.T) { Now = func() time.Time { - return time.Date(2020, 01, 16, 3, 31, 34, 525, time.UTC) + return time.Date(2020, 0o1, 16, 3, 31, 34, 525, time.UTC) } - noYear := time.Date(0, 01, 17, 3, 31, 34, 525, time.UTC) + noYear := time.Date(0, 0o1, 17, 3, 31, 34, 525, time.UTC) yearAdded := SetTimestampYear(noYear) - expected := time.Date(2020, 01, 17, 3, 31, 34, 525, time.UTC) + expected := time.Date(2020, 0o1, 17, 3, 31, 34, 525, time.UTC) require.Equal(t, expected, yearAdded) }) t.Run("FutureEightDays", func(t *testing.T) { Now = func() time.Time { - return time.Date(2020, 01, 16, 3, 31, 34, 525, time.UTC) + return time.Date(2020, 0o1, 16, 3, 31, 34, 525, time.UTC) } - noYear := time.Date(0, 01, 24, 3, 31, 34, 525, time.UTC) + noYear := time.Date(0, 0o1, 24, 3, 31, 34, 525, time.UTC) yearAdded := SetTimestampYear(noYear) - expected := time.Date(2019, 01, 24, 3, 31, 34, 525, time.UTC) + expected := time.Date(2019, 0o1, 24, 3, 31, 34, 525, time.UTC) require.Equal(t, expected, yearAdded) }) t.Run("RolloverYear", func(t *testing.T) { Now = func() time.Time { - return time.Date(2020, 01, 01, 3, 31, 34, 525, time.UTC) + return time.Date(2020, 0o1, 0o1, 3, 31, 34, 525, time.UTC) } noYear := time.Date(0, 12, 31, 3, 31, 34, 525, time.UTC) diff --git a/internal/docker/matcher.go b/internal/docker/matcher.go index c0c14911a337..e54e9c75773f 100644 --- a/internal/docker/matcher.go +++ b/internal/docker/matcher.go @@ -56,7 +56,6 @@ func newStringMatcher(items []string) (*stringMatcher, error) { // by definition this must lead and end with '/' chars reText := item[1 : len(item)-1] re, err = regexp.Compile(reText) - if err != nil { return nil, fmt.Errorf("invalid regex item: %w", err) } diff --git a/internal/filter/filterset/config.go b/internal/filter/filterset/config.go index 7a60b641651e..f6c6c757d549 100644 --- a/internal/filter/filterset/config.go +++ b/internal/filter/filterset/config.go @@ -22,9 +22,7 @@ const ( MatchTypeFieldName = "match_type" ) -var ( - validMatchTypes = []MatchType{Regexp, Strict} -) +var validMatchTypes = []MatchType{Regexp, Strict} // Config configures the matching behavior of a FilterSet. type Config struct { diff --git a/internal/filter/filterset/regexp/regexpfilterset_test.go b/internal/filter/filterset/regexp/regexpfilterset_test.go index 6e8dfbf4ff6e..b6e87d15fde7 100644 --- a/internal/filter/filterset/regexp/regexpfilterset_test.go +++ b/internal/filter/filterset/regexp/regexpfilterset_test.go @@ -10,18 +10,16 @@ import ( "github.com/stretchr/testify/require" ) -var ( - validRegexpFilters = []string{ - "prefix/.*", - "prefix_.*", - ".*/suffix", - ".*_suffix", - ".*/contains/.*", - ".*_contains_.*", - "full/name/match", - "full_name_match", - } -) +var validRegexpFilters = []string{ + "prefix/.*", + "prefix_.*", + ".*/suffix", + ".*_suffix", + ".*/contains/.*", + ".*_contains_.*", + "full/name/match", + "full_name_match", +} func TestNewRegexpFilterSet(t *testing.T) { tests := []struct { diff --git a/internal/filter/filterset/strict/strictfilterset_test.go b/internal/filter/filterset/strict/strictfilterset_test.go index 63317917c534..9947b58971fa 100644 --- a/internal/filter/filterset/strict/strictfilterset_test.go +++ b/internal/filter/filterset/strict/strictfilterset_test.go @@ -9,13 +9,11 @@ import ( "github.com/stretchr/testify/assert" ) -var ( - validStrictFilters = []string{ - "exact_string_match", - ".*/suffix", - "(a|b)", - } -) +var validStrictFilters = []string{ + "exact_string_match", + ".*/suffix", + "(a|b)", +} func TestNewStrictFilterSet(t *testing.T) { tests := []struct { diff --git a/internal/k8sconfig/config.go b/internal/k8sconfig/config.go index 8d57fef8709d..d3391166762d 100644 --- a/internal/k8sconfig/config.go +++ b/internal/k8sconfig/config.go @@ -101,7 +101,6 @@ func CreateRestConfig(apiConf APIConfig) (*rest.Config, error) { } authConf, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig( loadingRules, configOverrides).ClientConfig() - if err != nil { return nil, fmt.Errorf("error connecting to k8s with auth_type=%s: %w", AuthTypeKubeConfig, err) } diff --git a/internal/k8stest/client.go b/internal/k8stest/client.go index 787cd5f7a81a..3de4092c4d51 100644 --- a/internal/k8stest/client.go +++ b/internal/k8stest/client.go @@ -41,5 +41,6 @@ func NewK8sClient(kubeconfigPath string) (*K8sClient, error) { mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(discoveryClient)) return &K8sClient{ - DynamicClient: dynamicClient, DiscoveryClient: discoveryClient, Mapper: mapper}, nil + DynamicClient: dynamicClient, DiscoveryClient: discoveryClient, Mapper: mapper, + }, nil } diff --git a/internal/kubelet/client_test.go b/internal/kubelet/client_test.go index f1ac1be5b478..a177e3404361 100644 --- a/internal/kubelet/client_test.go +++ b/internal/kubelet/client_test.go @@ -28,9 +28,11 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig" ) -const certPath = "./testdata/testcert.crt" -const keyFile = "./testdata/testkey.key" -const errSignedByUnknownCA = "tls: failed to verify certificate: x509: certificate signed by unknown authority" +const ( + certPath = "./testdata/testcert.crt" + keyFile = "./testdata/testkey.key" + errSignedByUnknownCA = "tls: failed to verify certificate: x509: certificate signed by unknown authority" +) func TestClient(t *testing.T) { tr := &fakeRoundTripper{} diff --git a/internal/metadataproviders/aws/ec2/metadata_test.go b/internal/metadataproviders/aws/ec2/metadata_test.go index 39fb4c43f0a1..1cdb0f53c497 100644 --- a/internal/metadataproviders/aws/ec2/metadata_test.go +++ b/internal/metadataproviders/aws/ec2/metadata_test.go @@ -39,8 +39,7 @@ func TestMetadataProviderGetError(t *testing.T) { } func TestMetadataProvider_available(t *testing.T) { - type fields struct { - } + type fields struct{} type args struct { ctx context.Context sess *session.Session diff --git a/internal/otelarrow/admission2/boundedqueue.go b/internal/otelarrow/admission2/boundedqueue.go index e0cd95e3bb2c..026ad341c317 100644 --- a/internal/otelarrow/admission2/boundedqueue.go +++ b/internal/otelarrow/admission2/boundedqueue.go @@ -15,8 +15,10 @@ import ( "google.golang.org/grpc/status" ) -var ErrTooMuchWaiting = status.Error(grpccodes.ResourceExhausted, "rejecting request, too much pending data") -var ErrRequestTooLarge = status.Errorf(grpccodes.InvalidArgument, "rejecting request, request is too large") +var ( + ErrTooMuchWaiting = status.Error(grpccodes.ResourceExhausted, "rejecting request, too much pending data") + ErrRequestTooLarge = status.Errorf(grpccodes.InvalidArgument, "rejecting request, request is too large") +) // BoundedQueue is a LIFO-oriented admission-controlled Queue. type BoundedQueue struct { diff --git a/internal/otelarrow/test/e2e_test.go b/internal/otelarrow/test/e2e_test.go index 348956d2d681..9c5f75d64da0 100644 --- a/internal/otelarrow/test/e2e_test.go +++ b/internal/otelarrow/test/e2e_test.go @@ -78,13 +78,15 @@ type testConsumer struct { var _ consumer.Traces = &testConsumer{} -type ExpConfig = otelarrowexporter.Config -type RecvConfig = otelarrowreceiver.Config -type CfgFunc func(*ExpConfig, *RecvConfig) -type GenFunc func(int) ptrace.Traces -type MkGen func() GenFunc -type EndFunc func(t *testing.T, tp testParams, testCon *testConsumer, expect [][]ptrace.Traces) (rops, eops map[string]int) -type ConsumerErrFunc func(t *testing.T, err error) +type ( + ExpConfig = otelarrowexporter.Config + RecvConfig = otelarrowreceiver.Config + CfgFunc func(*ExpConfig, *RecvConfig) + GenFunc func(int) ptrace.Traces + MkGen func() GenFunc + EndFunc func(t *testing.T, tp testParams, testCon *testConsumer, expect [][]ptrace.Traces) (rops, eops map[string]int) + ConsumerErrFunc func(t *testing.T, err error) +) func (*testConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{} @@ -462,7 +464,7 @@ func TestIntegrationTracesSimple(t *testing.T) { defer cancel() // until 10 threads can write 1000 spans - var params = testParams{ + params := testParams{ threadCount: 10, requestWhileTrue: func(test *testConsumer) bool { return test.sink.SpanCount() < 1000 @@ -483,7 +485,7 @@ func TestIntegrationDeadlinePropagation(t *testing.T) { defer cancel() // Until at least one span is written. - var params = testParams{ + params := testParams{ threadCount: 1, requestWhileTrue: func(test *testConsumer) bool { return test.sink.SpanCount() < 1 @@ -596,7 +598,7 @@ func TestIntegrationSelfTracing(t *testing.T) { defer cancel() // until 2 Arrow stream spans are received from self instrumentation - var params = testParams{ + params := testParams{ threadCount: 10, requestWhileTrue: func(test *testConsumer) bool { cnt := 0 diff --git a/internal/pdatautil/logs_test.go b/internal/pdatautil/logs_test.go index 48329fac7876..75b51344a24e 100644 --- a/internal/pdatautil/logs_test.go +++ b/internal/pdatautil/logs_test.go @@ -142,13 +142,15 @@ func TestGroupByResourceLogs(t *testing.T) { }, { name: "single", - input: []resourceLogs{newResourceLogs(1, - newScopeLogs(11, 101, 102, 103), - ), + input: []resourceLogs{ + newResourceLogs(1, + newScopeLogs(11, 101, 102, 103), + ), }, - expected: []resourceLogs{newResourceLogs(1, - newScopeLogs(11, 101, 102, 103), - ), + expected: []resourceLogs{ + newResourceLogs(1, + newScopeLogs(11, 101, 102, 103), + ), }, }, { diff --git a/internal/splunk/common.go b/internal/splunk/common.go index c6b4f67e6e4d..c7cc3c2ecb40 100644 --- a/internal/splunk/common.go +++ b/internal/splunk/common.go @@ -38,9 +38,7 @@ const ( metricNamePattern = "^metric_name:([A-Za-z\\.:][A-Za-z0-9_\\.:]*)$" ) -var ( - metricNameRegexp = regexp.MustCompile(metricNamePattern) -) +var metricNameRegexp = regexp.MustCompile(metricNamePattern) // AccessTokenPassthroughConfig configures passing through access tokens. type AccessTokenPassthroughConfig struct { diff --git a/internal/sqlquery/scraper_test.go b/internal/sqlquery/scraper_test.go index 208657c7c3e7..f7883589f9f9 100644 --- a/internal/sqlquery/scraper_test.go +++ b/internal/sqlquery/scraper_test.go @@ -378,17 +378,18 @@ func TestScraper_FakeDB_MultiRows_Error(t *testing.T) { Client: NewDbClient(db, "", logger, TelemetryConfig{}), Logger: logger, Query: Query{ - Metrics: []MetricCfg{{ - MetricName: "my.col.0", - ValueColumn: "col_0", - Description: "my description 0", - Unit: "my-unit-0", - }, { - MetricName: "my.col.1", - ValueColumn: "col_1", - Description: "my description 1", - Unit: "my-unit-1", - }, + Metrics: []MetricCfg{ + { + MetricName: "my.col.0", + ValueColumn: "col_0", + Description: "my description 0", + Unit: "my-unit-0", + }, { + MetricName: "my.col.1", + ValueColumn: "col_1", + Description: "my description 1", + Unit: "my-unit-1", + }, }, }, } From a7d32bfbf154285556cc0f4976489933129453ec Mon Sep 17 00:00:00 2001 From: Joshua MacDonald Date: Fri, 15 Nov 2024 11:49:01 -0800 Subject: [PATCH 07/14] Make otelarrow admission control metrics consistent (#36359) --- .chloggen/otelarrow-metrics.yaml | 31 +++ internal/otelarrow/admission2/boundedqueue.go | 39 +++- .../otelarrow/admission2/boundedqueue_test.go | 198 +++++++++++++----- internal/otelarrow/doc.go | 6 + internal/otelarrow/documentation.md | 23 ++ .../generated_component_telemetry_test.go | 11 +- internal/otelarrow/generated_package_test.go | 13 ++ internal/otelarrow/go.mod | 1 + .../internal/metadata/generated_telemetry.go | 97 +++++++++ .../metadata/generated_telemetry_test.go | 4 +- internal/otelarrow/metadata.yaml | 25 +++ receiver/otelarrowreceiver/README.md | 11 +- receiver/otelarrowreceiver/go.mod | 4 +- .../otelarrowreceiver/internal/arrow/arrow.go | 53 ++--- .../internal/arrow/arrow_test.go | 9 +- .../internal/logs/otlp_test.go | 3 +- .../internal/metadata/generated_telemetry.go | 77 ------- .../internal/metrics/otlp_test.go | 3 +- .../internal/trace/otlp_test.go | 3 +- receiver/otelarrowreceiver/metadata.yaml | 26 --- receiver/otelarrowreceiver/otelarrow.go | 5 +- 21 files changed, 425 insertions(+), 217 deletions(-) create mode 100644 .chloggen/otelarrow-metrics.yaml create mode 100644 internal/otelarrow/doc.go create mode 100644 internal/otelarrow/documentation.md rename {receiver/otelarrowreceiver => internal/otelarrow}/generated_component_telemetry_test.go (85%) create mode 100644 internal/otelarrow/generated_package_test.go create mode 100644 internal/otelarrow/internal/metadata/generated_telemetry.go rename {receiver/otelarrowreceiver => internal/otelarrow}/internal/metadata/generated_telemetry_test.go (95%) delete mode 100644 receiver/otelarrowreceiver/internal/metadata/generated_telemetry.go diff --git a/.chloggen/otelarrow-metrics.yaml b/.chloggen/otelarrow-metrics.yaml new file mode 100644 index 000000000000..b5531151dd7d --- /dev/null +++ b/.chloggen/otelarrow-metrics.yaml @@ -0,0 +1,31 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: otelarrowreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: New admission control metrics are consistent across Arrow and OTLP data paths. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36334] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + `otelcol_otelarrow_admission_in_flight_bytes` new, replaces `otelcol_otel_arrow_receiver_in_flight_bytes` + `otelcol_otelarrow_admission_waiting_bytes`: new, describes waiting requests + `otelcol_otel_arrow_receiver_in_flight_items`: removed + `otelcol_otel_arrow_receiver_in_flight_requests`: removed + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/internal/otelarrow/admission2/boundedqueue.go b/internal/otelarrow/admission2/boundedqueue.go index 026ad341c317..0b5efc67fb48 100644 --- a/internal/otelarrow/admission2/boundedqueue.go +++ b/internal/otelarrow/admission2/boundedqueue.go @@ -10,9 +10,13 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" grpccodes "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + internalmetadata "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats" ) var ( @@ -22,9 +26,10 @@ var ( // BoundedQueue is a LIFO-oriented admission-controlled Queue. type BoundedQueue struct { - maxLimitAdmit uint64 - maxLimitWait uint64 - tracer trace.Tracer + maxLimitAdmit uint64 + maxLimitWait uint64 + tracer trace.Tracer + telemetryBuilder *internalmetadata.TelemetryBuilder // lock protects currentAdmitted, currentWaiting, and waiters @@ -45,13 +50,37 @@ type waiter struct { // NewBoundedQueue returns a LIFO-oriented Queue implementation which // admits `maxLimitAdmit` bytes concurrently and allows up to // `maxLimitWait` bytes to wait for admission. -func NewBoundedQueue(ts component.TelemetrySettings, maxLimitAdmit, maxLimitWait uint64) Queue { - return &BoundedQueue{ +func NewBoundedQueue(id component.ID, ts component.TelemetrySettings, maxLimitAdmit, maxLimitWait uint64) (Queue, error) { + bq := &BoundedQueue{ maxLimitAdmit: maxLimitAdmit, maxLimitWait: maxLimitWait, waiters: list.New(), tracer: ts.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow"), } + attr := metric.WithAttributes(attribute.String(netstats.ReceiverKey, id.String())) + telemetryBuilder, err := internalmetadata.NewTelemetryBuilder(ts, + internalmetadata.WithOtelarrowAdmissionInFlightBytesCallback(bq.inFlightCB, attr), + internalmetadata.WithOtelarrowAdmissionWaitingBytesCallback(bq.waitingCB, attr), + ) + if err != nil { + return nil, err + } + bq.telemetryBuilder = telemetryBuilder + return bq, nil +} + +func (bq *BoundedQueue) inFlightCB() int64 { + // Note, see https://github.com/open-telemetry/otel-arrow/issues/270 + bq.lock.Lock() + defer bq.lock.Unlock() + return int64(bq.currentAdmitted) +} + +func (bq *BoundedQueue) waitingCB() int64 { + // Note, see https://github.com/open-telemetry/otel-arrow/issues/270 + bq.lock.Lock() + defer bq.lock.Unlock() + return int64(bq.currentWaiting) } // acquireOrGetWaiter returns with three distinct conditions depending diff --git a/internal/otelarrow/admission2/boundedqueue_test.go b/internal/otelarrow/admission2/boundedqueue_test.go index 8903e8b90773..354419f6893a 100644 --- a/internal/otelarrow/admission2/boundedqueue_test.go +++ b/internal/otelarrow/admission2/boundedqueue_test.go @@ -12,23 +12,53 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" "google.golang.org/grpc/codes" grpccodes "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats" +) + +const ( + expectScope = "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow" + expectInFlightName = "otelcol_otelarrow_admission_in_flight_bytes" + expectWaitingName = "otelcol_otelarrow_admission_waiting_bytes" ) type bqTest struct { - t *testing.T + t *testing.T + reader *sdkmetric.ManualReader + provider *sdkmetric.MeterProvider *BoundedQueue } -var noopTelemetry = componenttest.NewNopTelemetrySettings() - func newBQTest(t *testing.T, maxAdmit, maxWait uint64) bqTest { + settings := componenttest.NewNopTelemetrySettings() + + reader := sdkmetric.NewManualReader() + provider := sdkmetric.NewMeterProvider( + sdkmetric.WithResource(resource.Empty()), + sdkmetric.WithReader(reader), + ) + settings.MeterProvider = provider + settings.LeveledMeterProvider = func(_ configtelemetry.Level) metric.MeterProvider { + return settings.MeterProvider + } + + bq, err := NewBoundedQueue(component.MustNewID("admission_testing"), settings, maxAdmit, maxWait) + require.NoError(t, err) return bqTest{ t: t, - BoundedQueue: NewBoundedQueue(noopTelemetry, maxAdmit, maxWait).(*BoundedQueue), + reader: reader, + provider: provider, + BoundedQueue: bq.(*BoundedQueue), } } @@ -67,35 +97,39 @@ func mkRange(from, to uint64) []uint64 { func TestBoundedQueueLimits(t *testing.T) { for _, test := range []struct { - name string - maxLimitAdmit uint64 - maxLimitWait uint64 - requestSizes []uint64 - timeout time.Duration - expectErrs map[string]int + name string + maxLimitAdmit uint64 + maxLimitWait uint64 + expectAcquired [2]int64 + requestSizes []uint64 + timeout time.Duration + expectErrs map[string]int }{ { - name: "simple_no_waiters_25", - maxLimitAdmit: 1000, - maxLimitWait: 0, - requestSizes: mkRepeat(25, 40), - timeout: 0, - expectErrs: map[string]int{}, + name: "simple_no_waiters_25", + maxLimitAdmit: 1000, + maxLimitWait: 0, + requestSizes: mkRepeat(25, 40), + expectAcquired: [2]int64{1000, 1000}, + timeout: 0, + expectErrs: map[string]int{}, }, { - name: "simple_no_waiters_1", - maxLimitAdmit: 1000, - maxLimitWait: 0, - requestSizes: mkRepeat(1, 1000), - timeout: 0, - expectErrs: map[string]int{}, + name: "simple_no_waiters_1", + maxLimitAdmit: 1000, + maxLimitWait: 0, + requestSizes: mkRepeat(1, 1000), + expectAcquired: [2]int64{1000, 1000}, + timeout: 0, + expectErrs: map[string]int{}, }, { - name: "without_waiting_remainder", - maxLimitAdmit: 1000, - maxLimitWait: 0, - requestSizes: mkRepeat(30, 40), - timeout: 0, + name: "without_waiting_remainder", + maxLimitAdmit: 1000, + maxLimitWait: 0, + requestSizes: mkRepeat(30, 40), + expectAcquired: [2]int64{990, 990}, + timeout: 0, expectErrs: map[string]int{ // 7 failures with a remainder of 10 // 30 * (40 - 7) = 990 @@ -103,51 +137,57 @@ func TestBoundedQueueLimits(t *testing.T) { }, }, { - name: "without_waiting_complete", - maxLimitAdmit: 1000, - maxLimitWait: 0, - requestSizes: append(mkRepeat(30, 40), 10), - timeout: 0, + name: "without_waiting_complete", + maxLimitAdmit: 1000, + maxLimitWait: 0, + requestSizes: append(mkRepeat(30, 40), 10), + expectAcquired: [2]int64{1000, 1000}, + timeout: 0, expectErrs: map[string]int{ // 30*33+10 succeed, 7 failures (as above) ErrTooMuchWaiting.Error(): 7, }, }, { - name: "with_waiters_timeout", - maxLimitAdmit: 1000, - maxLimitWait: 1000, - requestSizes: mkRepeat(20, 100), - timeout: time.Second, + name: "with_waiters_timeout", + maxLimitAdmit: 1000, + maxLimitWait: 1000, + requestSizes: mkRepeat(20, 100), + expectAcquired: [2]int64{1000, 1000}, + timeout: time.Second, expectErrs: map[string]int{ // 20*50=1000 is half of the requests timing out status.Error(grpccodes.Canceled, context.DeadlineExceeded.Error()).Error(): 50, }, }, { - name: "with_size_exceeded", - maxLimitAdmit: 1000, - maxLimitWait: 2000, - requestSizes: []uint64{1001}, - timeout: 0, + name: "with_size_exceeded", + maxLimitAdmit: 1000, + maxLimitWait: 2000, + requestSizes: []uint64{1001}, + expectAcquired: [2]int64{0, 0}, + timeout: 0, expectErrs: map[string]int{ ErrRequestTooLarge.Error(): 1, }, }, { - name: "mixed_sizes", - maxLimitAdmit: 45, // 45 is the exact sum of request sizes - maxLimitWait: 0, - requestSizes: mkRange(1, 9), - timeout: 0, - expectErrs: map[string]int{}, + name: "mixed_sizes", + maxLimitAdmit: 45, // 45 is the exact sum of request sizes + maxLimitWait: 0, + requestSizes: mkRange(1, 9), + expectAcquired: [2]int64{45, 45}, + timeout: 0, + expectErrs: map[string]int{}, }, { name: "too_many_mixed_sizes", maxLimitAdmit: 44, // all but one request will succeed maxLimitWait: 0, requestSizes: mkRange(1, 9), - timeout: 0, + // worst case is the size=9 fails, so minimum is 44-9+1 + expectAcquired: [2]int64{36, 44}, + timeout: 0, expectErrs: map[string]int{ ErrTooMuchWaiting.Error(): 1, }, @@ -190,6 +230,12 @@ func TestBoundedQueueLimits(t *testing.T) { wait1.Wait() + // The in-flight bytes are in-range, none waiting. + inflight, waiting := bq.verifyMetrics(t) + require.LessOrEqual(t, test.expectAcquired[0], inflight) + require.GreaterOrEqual(t, test.expectAcquired[1], inflight) + require.Equal(t, int64(0), waiting) + close(releaseChan) wait2.Wait() @@ -214,10 +260,55 @@ func TestBoundedQueueLimits(t *testing.T) { // and the final state is all 0. bq.waitForPending(0, 0) + + // metrics are zero + inflight, waiting = bq.verifyMetrics(t) + require.Equal(t, int64(0), inflight) + require.Equal(t, int64(0), waiting) }) } } +func (bq bqTest) verifyPoint(t *testing.T, m metricdata.Metrics) int64 { + switch a := m.Data.(type) { + case metricdata.Sum[int64]: + require.Len(t, a.DataPoints, 1) + dp := a.DataPoints[0] + for _, attr := range dp.Attributes.ToSlice() { + if attr.Key == netstats.ReceiverKey && attr.Value.AsString() == "admission_testing" { + return dp.Value + } + } + t.Errorf("point value not found: %v", m.Data) + default: + t.Errorf("incorrect metric data type: %T", m.Data) + } + return -1 +} + +func (bq bqTest) verifyMetrics(t *testing.T) (inflight int64, waiting int64) { + inflight = -1 + waiting = -1 + + var rm metricdata.ResourceMetrics + require.NoError(t, bq.reader.Collect(context.Background(), &rm)) + + for _, sm := range rm.ScopeMetrics { + if sm.Scope.Name != expectScope { + continue + } + for _, m := range sm.Metrics { + switch m.Name { + case expectInFlightName: + inflight = bq.verifyPoint(t, m) + case expectWaitingName: + waiting = bq.verifyPoint(t, m) + } + } + } + return +} + func TestBoundedQueueLIFO(t *testing.T) { const maxAdmit = 10 @@ -252,6 +343,11 @@ func TestBoundedQueueLIFO(t *testing.T) { notW1 := bq.startWaiter(ctx, secondWait, &relW1) bq.waitForPending(maxAdmit, maxAdmit) + // The in-flight and waiting bytes are counted. + inflight, waiting := bq.verifyMetrics(t) + require.Equal(t, int64(maxAdmit), inflight) + require.Equal(t, int64(maxAdmit), waiting) + relFirst() // early is true when releasing the first acquired @@ -278,6 +374,10 @@ func TestBoundedQueueLIFO(t *testing.T) { relW1() bq.waitForPending(0, 0) + + inflight, waiting = bq.verifyMetrics(t) + require.Equal(t, int64(0), inflight) + require.Equal(t, int64(0), waiting) }) } } diff --git a/internal/otelarrow/doc.go b/internal/otelarrow/doc.go new file mode 100644 index 000000000000..a3890efafb00 --- /dev/null +++ b/internal/otelarrow/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package otelarrow // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow" diff --git a/internal/otelarrow/documentation.md b/internal/otelarrow/documentation.md new file mode 100644 index 000000000000..eff8f632560b --- /dev/null +++ b/internal/otelarrow/documentation.md @@ -0,0 +1,23 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# otelarrow + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### otelcol_otelarrow_admission_in_flight_bytes + +Number of bytes that have started processing but are not finished. + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| By | Sum | Int | false | + +### otelcol_otelarrow_admission_waiting_bytes + +Number of items waiting to start processing. + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| By | Sum | Int | false | diff --git a/receiver/otelarrowreceiver/generated_component_telemetry_test.go b/internal/otelarrow/generated_component_telemetry_test.go similarity index 85% rename from receiver/otelarrowreceiver/generated_component_telemetry_test.go rename to internal/otelarrow/generated_component_telemetry_test.go index f738ca8f5696..15169a4838c9 100644 --- a/receiver/otelarrowreceiver/generated_component_telemetry_test.go +++ b/internal/otelarrow/generated_component_telemetry_test.go @@ -1,6 +1,6 @@ // Code generated by mdatagen. DO NOT EDIT. -package otelarrowreceiver +package otelarrow import ( "context" @@ -15,8 +15,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/receiver/receivertest" ) type componentTestTelemetry struct { @@ -24,13 +22,6 @@ type componentTestTelemetry struct { meterProvider *sdkmetric.MeterProvider } -func (tt *componentTestTelemetry) NewSettings() receiver.Settings { - set := receivertest.NewNopSettings() - set.TelemetrySettings = tt.newTelemetrySettings() - set.ID = component.NewID(component.MustNewType("otelarrow")) - return set -} - func (tt *componentTestTelemetry) newTelemetrySettings() component.TelemetrySettings { set := componenttest.NewNopTelemetrySettings() set.MeterProvider = tt.meterProvider diff --git a/internal/otelarrow/generated_package_test.go b/internal/otelarrow/generated_package_test.go new file mode 100644 index 000000000000..ec77bf1fb2a0 --- /dev/null +++ b/internal/otelarrow/generated_package_test.go @@ -0,0 +1,13 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package otelarrow + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/internal/otelarrow/go.mod b/internal/otelarrow/go.mod index f42ea96a0fa6..0b883120dc36 100644 --- a/internal/otelarrow/go.mod +++ b/internal/otelarrow/go.mod @@ -23,6 +23,7 @@ require ( go.opentelemetry.io/otel/sdk v1.32.0 go.opentelemetry.io/otel/sdk/metric v1.32.0 go.opentelemetry.io/otel/trace v1.32.0 + go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 google.golang.org/grpc v1.67.1 diff --git a/internal/otelarrow/internal/metadata/generated_telemetry.go b/internal/otelarrow/internal/metadata/generated_telemetry.go new file mode 100644 index 000000000000..64c1e40abefc --- /dev/null +++ b/internal/otelarrow/internal/metadata/generated_telemetry.go @@ -0,0 +1,97 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +// Deprecated: [v0.108.0] use LeveledMeter instead. +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow") +} + +func LeveledMeter(settings component.TelemetrySettings, level configtelemetry.Level) metric.Meter { + return settings.LeveledMeterProvider(level).Meter("github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + OtelarrowAdmissionInFlightBytes metric.Int64ObservableUpDownCounter + observeOtelarrowAdmissionInFlightBytes func(context.Context, metric.Observer) error + OtelarrowAdmissionWaitingBytes metric.Int64ObservableUpDownCounter + observeOtelarrowAdmissionWaitingBytes func(context.Context, metric.Observer) error + meters map[configtelemetry.Level]metric.Meter +} + +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// WithOtelarrowAdmissionInFlightBytesCallback sets callback for observable OtelarrowAdmissionInFlightBytes metric. +func WithOtelarrowAdmissionInFlightBytesCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeOtelarrowAdmissionInFlightBytes = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.OtelarrowAdmissionInFlightBytes, cb(), opts...) + return nil + } + }) +} + +// WithOtelarrowAdmissionWaitingBytesCallback sets callback for observable OtelarrowAdmissionWaitingBytes metric. +func WithOtelarrowAdmissionWaitingBytesCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeOtelarrowAdmissionWaitingBytes = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.OtelarrowAdmissionWaitingBytes, cb(), opts...) + return nil + } + }) +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{meters: map[configtelemetry.Level]metric.Meter{}} + for _, op := range options { + op.apply(&builder) + } + builder.meters[configtelemetry.LevelBasic] = LeveledMeter(settings, configtelemetry.LevelBasic) + var err, errs error + builder.OtelarrowAdmissionInFlightBytes, err = builder.meters[configtelemetry.LevelBasic].Int64ObservableUpDownCounter( + "otelcol_otelarrow_admission_in_flight_bytes", + metric.WithDescription("Number of bytes that have started processing but are not finished."), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = builder.meters[configtelemetry.LevelBasic].RegisterCallback(builder.observeOtelarrowAdmissionInFlightBytes, builder.OtelarrowAdmissionInFlightBytes) + errs = errors.Join(errs, err) + builder.OtelarrowAdmissionWaitingBytes, err = builder.meters[configtelemetry.LevelBasic].Int64ObservableUpDownCounter( + "otelcol_otelarrow_admission_waiting_bytes", + metric.WithDescription("Number of items waiting to start processing."), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = builder.meters[configtelemetry.LevelBasic].RegisterCallback(builder.observeOtelarrowAdmissionWaitingBytes, builder.OtelarrowAdmissionWaitingBytes) + errs = errors.Join(errs, err) + return &builder, errs +} diff --git a/receiver/otelarrowreceiver/internal/metadata/generated_telemetry_test.go b/internal/otelarrow/internal/metadata/generated_telemetry_test.go similarity index 95% rename from receiver/otelarrowreceiver/internal/metadata/generated_telemetry_test.go rename to internal/otelarrow/internal/metadata/generated_telemetry_test.go index 25002a5869ad..ab1411c26336 100644 --- a/receiver/otelarrowreceiver/internal/metadata/generated_telemetry_test.go +++ b/internal/otelarrow/internal/metadata/generated_telemetry_test.go @@ -54,14 +54,14 @@ func TestProviders(t *testing.T) { meter := Meter(set) if m, ok := meter.(mockMeter); ok { - require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver", m.name) + require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow", m.name) } else { require.Fail(t, "returned Meter not mockMeter") } tracer := Tracer(set) if m, ok := tracer.(mockTracer); ok { - require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver", m.name) + require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow", m.name) } else { require.Fail(t, "returned Meter not mockTracer") } diff --git a/internal/otelarrow/metadata.yaml b/internal/otelarrow/metadata.yaml index 0bd31377fc0d..56966ea6b25b 100644 --- a/internal/otelarrow/metadata.yaml +++ b/internal/otelarrow/metadata.yaml @@ -1,3 +1,28 @@ +type: otelarrow + status: + class: pkg + stability: + beta: [traces, metrics, logs] codeowners: active: [jmacd, moh-osman3] + +telemetry: + metrics: + otelarrow_admission_in_flight_bytes: + description: Number of bytes that have started processing but are not finished. + unit: By + enabled: true + sum: + monotonic: false + value_type: int + async: true + + otelarrow_admission_waiting_bytes: + description: Number of items waiting to start processing. + enabled: true + unit: By + sum: + monotonic: false + value_type: int + async: true diff --git a/receiver/otelarrowreceiver/README.md b/receiver/otelarrowreceiver/README.md index 2088caec1bb0..2257de9d5ae9 100644 --- a/receiver/otelarrowreceiver/README.md +++ b/receiver/otelarrowreceiver/README.md @@ -173,9 +173,8 @@ exporters: In addition to the the standard [obsreport](https://pkg.go.dev/go.opentelemetry.io/collector/obsreport) -metrics, this component provides network-level measurement instruments -which we anticipate will become part of `obsreport` in the future. At -the `normal` level of metrics detail: +metrics, this component provides network-level measurement instruments +measuring bytes transported. At the `normal` level of metrics detail: - `otelcol_receiver_recv`: uncompressed bytes received, prior to compression - `otelcol_receiver_recv_wire`: compressed bytes received, on the wire. @@ -189,6 +188,12 @@ of data being returned from the receiver will be instrumented: - `otelcol_receiver_sent`: uncompressed bytes sent, prior to compression - `otelcol_receiver_sent_wire`: compressed bytes sent, on the wire. +Metric instruments expose the state of the admission controller at the +`normal` level of detail: + +- `otelcol_otelarrow_admission_in_flight_bytes`: number of uncompressed bytes admitted into the pipeline +- `otelcol_otelarrow_admission_waiting_bytes`: number of uncompressed bytes waiting for admission + There several OpenTelemetry Protocol with Apache Arrow-consumer related metrics available to help diagnose internal performance. These are disabled at the basic level of detail. At the normal level, diff --git a/receiver/otelarrowreceiver/go.mod b/receiver/otelarrowreceiver/go.mod index b406619d0a6d..c96e2a0e495d 100644 --- a/receiver/otelarrowreceiver/go.mod +++ b/receiver/otelarrowreceiver/go.mod @@ -25,9 +25,7 @@ require ( go.opentelemetry.io/collector/receiver v0.113.0 go.opentelemetry.io/collector/receiver/receivertest v0.113.0 go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/metric v1.32.0 go.opentelemetry.io/otel/sdk v1.32.0 - go.opentelemetry.io/otel/sdk/metric v1.32.0 go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/goleak v1.3.0 go.uber.org/mock v0.5.0 @@ -78,6 +76,8 @@ require ( go.opentelemetry.io/collector/pipeline v0.113.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/sync v0.8.0 // indirect diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow.go b/receiver/otelarrowreceiver/internal/arrow/arrow.go index 6b28f52d356b..080b36e2a9ef 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow.go @@ -40,7 +40,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/admission2" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats" - internalmetadata "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver/internal/metadata" ) const ( @@ -68,15 +67,14 @@ type Receiver struct { arrowpb.UnsafeArrowLogsServiceServer arrowpb.UnsafeArrowMetricsServiceServer - telemetry component.TelemetrySettings - tracer trace.Tracer - obsrecv *receiverhelper.ObsReport - gsettings configgrpc.ServerConfig - authServer auth.Server - newConsumer func() arrowRecord.ConsumerAPI - netReporter netstats.Interface - telemetryBuilder *internalmetadata.TelemetryBuilder - boundedQueue admission2.Queue + telemetry component.TelemetrySettings + tracer trace.Tracer + obsrecv *receiverhelper.ObsReport + gsettings configgrpc.ServerConfig + authServer auth.Server + newConsumer func() arrowRecord.ConsumerAPI + netReporter netstats.Interface + boundedQueue admission2.Queue } // receiverStream holds the inFlightWG for a single stream. @@ -97,21 +95,16 @@ func New( netReporter netstats.Interface, ) (*Receiver, error) { tracer := set.TelemetrySettings.TracerProvider.Tracer("otel-arrow-receiver") - telemetryBuilder, err := internalmetadata.NewTelemetryBuilder(set.TelemetrySettings) - if err != nil { - return nil, err - } return &Receiver{ - Consumers: cs, - obsrecv: obsrecv, - telemetry: set.TelemetrySettings, - tracer: tracer, - authServer: authServer, - newConsumer: newConsumer, - gsettings: gsettings, - netReporter: netReporter, - boundedQueue: bq, - telemetryBuilder: telemetryBuilder, + Consumers: cs, + obsrecv: obsrecv, + telemetry: set.TelemetrySettings, + tracer: tracer, + authServer: authServer, + newConsumer: newConsumer, + gsettings: gsettings, + netReporter: netReporter, + boundedQueue: bq, }, nil } @@ -423,7 +416,6 @@ func (r *receiverStream) newInFlightData(ctx context.Context, method string, bat _, span := r.tracer.Start(ctx, "otel_arrow_stream_inflight") r.inFlightWG.Add(1) - r.telemetryBuilder.OtelArrowReceiverInFlightRequests.Add(ctx, 1) id := &inFlightData{ receiverStream: r, method: method, @@ -505,13 +497,6 @@ func (id *inFlightData) anyDone(ctx context.Context) { id.releaser() } - if id.uncompSize != 0 { - id.telemetryBuilder.OtelArrowReceiverInFlightBytes.Add(ctx, -id.uncompSize) - } - if id.numItems != 0 { - id.telemetryBuilder.OtelArrowReceiverInFlightItems.Add(ctx, int64(-id.numItems)) - } - // The netstats code knows that uncompressed size is // unreliable for arrow transport, so we instrument it // directly here. Only the primary direction of transport @@ -521,7 +506,6 @@ func (id *inFlightData) anyDone(ctx context.Context) { sized.Length = id.uncompSize id.netReporter.CountReceive(ctx, sized) - id.telemetryBuilder.OtelArrowReceiverInFlightRequests.Add(ctx, -1) id.inFlightWG.Done() } @@ -638,9 +622,6 @@ func (r *receiverStream) recvOne(streamCtx context.Context, serverStream anyStre flight.numItems = numItems flight.releaser = releaser - r.telemetryBuilder.OtelArrowReceiverInFlightBytes.Add(inflightCtx, uncompSize) - r.telemetryBuilder.OtelArrowReceiverInFlightItems.Add(inflightCtx, int64(numItems)) - // Recognize that the request is still in-flight via consumeAndRespond() flight.refs.Add(1) diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go index 57a92c7d922e..78ac585d5c23 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go @@ -50,9 +50,11 @@ import ( ) var noopTelemetry = componenttest.NewNopTelemetrySettings() +var testingID = component.MustNewID("testing") func defaultBQ() admission2.Queue { - return admission2.NewBoundedQueue(noopTelemetry, 100000, 10) + bq, _ := admission2.NewBoundedQueue(testingID, noopTelemetry, 100000, 10) + return bq } type ( @@ -462,11 +464,12 @@ func TestBoundedQueueLimits(t *testing.T) { // internal/otelarrow/test/e2e_test.go. if tt.expectErr { ctc.stream.EXPECT().Send(statusOKFor(batch.BatchId)).Times(0) - bq = admission2.NewBoundedQueue(noopTelemetry, uint64(sizer.TracesSize(td)-100), 0) + bq, err = admission2.NewBoundedQueue(testingID, noopTelemetry, uint64(sizer.TracesSize(td)-100), 0) } else { ctc.stream.EXPECT().Send(statusOKFor(batch.BatchId)).Times(1).Return(nil) - bq = admission2.NewBoundedQueue(noopTelemetry, uint64(tt.admitLimit), 0) + bq, err = admission2.NewBoundedQueue(testingID, noopTelemetry, uint64(tt.admitLimit), 0) } + require.NoError(t, err) ctc.start(ctc.newRealConsumer, bq) ctc.putBatch(batch, nil) diff --git a/receiver/otelarrowreceiver/internal/logs/otlp_test.go b/receiver/otelarrowreceiver/internal/logs/otlp_test.go index d270ff138bfb..d8c44d590542 100644 --- a/receiver/otelarrowreceiver/internal/logs/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/logs/otlp_test.go @@ -206,7 +206,8 @@ func otlpReceiverOnGRPCServer(t *testing.T, lc consumer.Logs) (net.Addr, *tracet ReceiverCreateSettings: set, }) require.NoError(t, err) - bq := admission2.NewBoundedQueue(telset, maxBytes, 0) + bq, err := admission2.NewBoundedQueue(set.ID, telset, maxBytes, 0) + require.NoError(t, err) r := New(zap.NewNop(), lc, obsrecv, bq) // Now run it as a gRPC server srv := grpc.NewServer() diff --git a/receiver/otelarrowreceiver/internal/metadata/generated_telemetry.go b/receiver/otelarrowreceiver/internal/metadata/generated_telemetry.go deleted file mode 100644 index eb423c1975f8..000000000000 --- a/receiver/otelarrowreceiver/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,77 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "errors" - - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" -) - -// Deprecated: [v0.108.0] use LeveledMeter instead. -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver") -} - -func LeveledMeter(settings component.TelemetrySettings, level configtelemetry.Level) metric.Meter { - return settings.LeveledMeterProvider(level).Meter("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver") -} - -// TelemetryBuilder provides an interface for components to report telemetry -// as defined in metadata and user config. -type TelemetryBuilder struct { - meter metric.Meter - OtelArrowReceiverInFlightBytes metric.Int64UpDownCounter - OtelArrowReceiverInFlightItems metric.Int64UpDownCounter - OtelArrowReceiverInFlightRequests metric.Int64UpDownCounter - meters map[configtelemetry.Level]metric.Meter -} - -// TelemetryBuilderOption applies changes to default builder. -type TelemetryBuilderOption interface { - apply(*TelemetryBuilder) -} - -type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) - -func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { - tbof(mb) -} - -// NewTelemetryBuilder provides a struct with methods to update all internal telemetry -// for a component -func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { - builder := TelemetryBuilder{meters: map[configtelemetry.Level]metric.Meter{}} - for _, op := range options { - op.apply(&builder) - } - builder.meters[configtelemetry.LevelBasic] = LeveledMeter(settings, configtelemetry.LevelBasic) - var err, errs error - builder.OtelArrowReceiverInFlightBytes, err = builder.meters[configtelemetry.LevelBasic].Int64UpDownCounter( - "otelcol_otel_arrow_receiver_in_flight_bytes", - metric.WithDescription("Number of bytes in flight"), - metric.WithUnit("By"), - ) - errs = errors.Join(errs, err) - builder.OtelArrowReceiverInFlightItems, err = builder.meters[configtelemetry.LevelBasic].Int64UpDownCounter( - "otelcol_otel_arrow_receiver_in_flight_items", - metric.WithDescription("Number of items in flight"), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.OtelArrowReceiverInFlightRequests, err = builder.meters[configtelemetry.LevelBasic].Int64UpDownCounter( - "otelcol_otel_arrow_receiver_in_flight_requests", - metric.WithDescription("Number of requests in flight"), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - return &builder, errs -} diff --git a/receiver/otelarrowreceiver/internal/metrics/otlp_test.go b/receiver/otelarrowreceiver/internal/metrics/otlp_test.go index a78378f54bd3..b0059a059685 100644 --- a/receiver/otelarrowreceiver/internal/metrics/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/metrics/otlp_test.go @@ -206,7 +206,8 @@ func otlpReceiverOnGRPCServer(t *testing.T, mc consumer.Metrics) (net.Addr, *tra ReceiverCreateSettings: set, }) require.NoError(t, err) - bq := admission2.NewBoundedQueue(telset, maxBytes, 0) + bq, err := admission2.NewBoundedQueue(set.ID, telset, maxBytes, 0) + require.NoError(t, err) r := New(zap.NewNop(), mc, obsrecv, bq) // Now run it as a gRPC server srv := grpc.NewServer() diff --git a/receiver/otelarrowreceiver/internal/trace/otlp_test.go b/receiver/otelarrowreceiver/internal/trace/otlp_test.go index 251e194dde3c..49c3ffa4c2b2 100644 --- a/receiver/otelarrowreceiver/internal/trace/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/trace/otlp_test.go @@ -206,7 +206,8 @@ func otlpReceiverOnGRPCServer(t *testing.T, tc consumer.Traces) (net.Addr, *trac ReceiverCreateSettings: set, }) require.NoError(t, err) - bq := admission2.NewBoundedQueue(telset, maxBytes, 0) + bq, err := admission2.NewBoundedQueue(set.ID, telset, maxBytes, 0) + require.NoError(t, err) r := New(zap.NewNop(), tc, obsrecv, bq) // Now run it as a gRPC server srv := grpc.NewServer() diff --git a/receiver/otelarrowreceiver/metadata.yaml b/receiver/otelarrowreceiver/metadata.yaml index ab229b2e5f78..b9b3b23dcc04 100644 --- a/receiver/otelarrowreceiver/metadata.yaml +++ b/receiver/otelarrowreceiver/metadata.yaml @@ -7,29 +7,3 @@ status: distributions: [contrib, k8s] codeowners: active: [jmacd, moh-osman3] - -telemetry: - metrics: - otel_arrow_receiver_in_flight_bytes: - description: Number of bytes in flight - unit: By - enabled: true - sum: - monotonic: false - value_type: int - - otel_arrow_receiver_in_flight_items: - description: Number of items in flight - enabled: true - unit: "1" - sum: - monotonic: false - value_type: int - - otel_arrow_receiver_in_flight_requests: - description: Number of requests in flight - enabled: true - unit: "1" - sum: - monotonic: false - value_type: int diff --git a/receiver/otelarrowreceiver/otelarrow.go b/receiver/otelarrowreceiver/otelarrow.go index b21bea395e96..c53c85b32190 100644 --- a/receiver/otelarrowreceiver/otelarrow.go +++ b/receiver/otelarrowreceiver/otelarrow.go @@ -70,7 +70,10 @@ func newOTelArrowReceiver(cfg *Config, set receiver.Settings) (*otelArrowReceive if cfg.Admission.RequestLimitMiB == 0 { bq = admission2.NewUnboundedQueue() } else { - bq = admission2.NewBoundedQueue(set.TelemetrySettings, cfg.Admission.RequestLimitMiB<<20, cfg.Admission.WaitingLimitMiB<<20) + bq, err = admission2.NewBoundedQueue(set.ID, set.TelemetrySettings, cfg.Admission.RequestLimitMiB<<20, cfg.Admission.WaitingLimitMiB<<20) + if err != nil { + return nil, err + } } r := &otelArrowReceiver{ cfg: cfg, From 220fe1351f08e0acec2d4c5edc238654069688d3 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Fri, 15 Nov 2024 15:11:37 -0700 Subject: [PATCH 08/14] [pkg/ottl] Add ottl cookbook talk (#36391) --- pkg/ottl/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/ottl/README.md b/pkg/ottl/README.md index 738f15891410..50ec9d9449ea 100644 --- a/pkg/ottl/README.md +++ b/pkg/ottl/README.md @@ -161,4 +161,5 @@ service: These are previous conference presentations given about OTTL: - [OTTL Me Why Transforming Telemetry in the OpenTelemetry Collector Just Got Better](https://youtu.be/uVs0oUV72CE) -- [Managing Observability Data at the Edge with the OpenTelemetry Collector and OTTL](https://youtu.be/GO0ulYLxy_8) \ No newline at end of file +- [Managing Observability Data at the Edge with the OpenTelemetry Collector and OTTL](https://youtu.be/GO0ulYLxy_8) +- [The OTTL Cookbook: A Collection of Solutions to Common Problems](https://www.youtube.com/watch?v=UGTU0-KT_60) \ No newline at end of file From f6cebd2e4c49111cc0343c8c5e55fb0cb127ca74 Mon Sep 17 00:00:00 2001 From: Florian Bacher Date: Fri, 15 Nov 2024 23:51:58 +0100 Subject: [PATCH 09/14] [pkg/ottl] Fix handling of slice values in `flatten` function by considering `depth` option (#36198) #### Description This PR adapts the `flatten` function to also consider the `depth` option when handling slice values. Before this change, the function flattened slice values beyond the given depth. #### Link to tracking issue Fixes #36161 #### Testing Added unit and e2e tests #### Documentation No changes here, as the docs already mention the expected behavior of the function when the `depth` option is set --------- Signed-off-by: Florian Bacher Co-authored-by: Evan Bradley <11745660+evan-bradley@users.noreply.github.com> --- ...-flatten-fix-top-level-slice-handling.yaml | 27 ++++++++ pkg/ottl/e2e/e2e_test.go | 15 +---- pkg/ottl/ottlfuncs/func_flatten.go | 6 +- pkg/ottl/ottlfuncs/func_flatten_test.go | 61 +++++++++++++++++-- 4 files changed, 87 insertions(+), 22 deletions(-) create mode 100644 .chloggen/ottl-flatten-fix-top-level-slice-handling.yaml diff --git a/.chloggen/ottl-flatten-fix-top-level-slice-handling.yaml b/.chloggen/ottl-flatten-fix-top-level-slice-handling.yaml new file mode 100644 index 000000000000..a625a284e379 --- /dev/null +++ b/.chloggen/ottl-flatten-fix-top-level-slice-handling.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/ottl + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Respect the `depth` option when flattening slices using `flatten` + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36161] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: The `depth` option is also now required to be at least `1`. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/pkg/ottl/e2e/e2e_test.go b/pkg/ottl/e2e/e2e_test.go index 88723b9b6a41..d1e7b0828fd7 100644 --- a/pkg/ottl/e2e/e2e_test.go +++ b/pkg/ottl/e2e/e2e_test.go @@ -105,19 +105,6 @@ func Test_e2e_editors(t *testing.T) { m.CopyTo(tCtx.GetLogRecord().Attributes()) }, }, - { - statement: `flatten(attributes, depth=0)`, - want: func(tCtx ottllog.TransformContext) { - tCtx.GetLogRecord().Attributes().Remove("things") - m1 := tCtx.GetLogRecord().Attributes().PutEmptyMap("things.0") - m1.PutStr("name", "foo") - m1.PutInt("value", 2) - - m2 := tCtx.GetLogRecord().Attributes().PutEmptyMap("things.1") - m2.PutStr("name", "bar") - m2.PutInt("value", 5) - }, - }, { statement: `flatten(attributes, depth=1)`, want: func(tCtx ottllog.TransformContext) { @@ -131,7 +118,7 @@ func Test_e2e_editors(t *testing.T) { m.PutStr("foo.flags", "pass") m.PutStr("foo.bar", "pass") m.PutStr("foo.flags", "pass") - m.PutStr("foo.slice.0", "val") + m.PutEmptySlice("foo.slice").AppendEmpty().SetStr("val") m1 := m.PutEmptyMap("things.0") m1.PutStr("name", "foo") diff --git a/pkg/ottl/ottlfuncs/func_flatten.go b/pkg/ottl/ottlfuncs/func_flatten.go index ebe30024612c..709eb1bef746 100644 --- a/pkg/ottl/ottlfuncs/func_flatten.go +++ b/pkg/ottl/ottlfuncs/func_flatten.go @@ -37,8 +37,8 @@ func flatten[K any](target ottl.PMapGetter[K], p ottl.Optional[string], d ottl.O depth := int64(math.MaxInt64) if !d.IsEmpty() { depth = d.Get() - if depth < 0 { - return nil, fmt.Errorf("invalid depth for flatten function, %d cannot be negative", depth) + if depth < 1 { + return nil, fmt.Errorf("invalid depth '%d' for flatten function, must be greater than 0", depth) } } @@ -69,7 +69,7 @@ func flattenHelper(m pcommon.Map, result pcommon.Map, prefix string, currentDept switch { case v.Type() == pcommon.ValueTypeMap && currentDepth < maxDepth: flattenHelper(v.Map(), result, prefix+k, currentDepth+1, maxDepth) - case v.Type() == pcommon.ValueTypeSlice: + case v.Type() == pcommon.ValueTypeSlice && currentDepth < maxDepth: for i := 0; i < v.Slice().Len(); i++ { v.Slice().At(i).CopyTo(result.PutEmpty(fmt.Sprintf("%v.%v", prefix+k, i))) } diff --git a/pkg/ottl/ottlfuncs/func_flatten_test.go b/pkg/ottl/ottlfuncs/func_flatten_test.go index 8b37355d3f56..39448e6e2fd0 100644 --- a/pkg/ottl/ottlfuncs/func_flatten_test.go +++ b/pkg/ottl/ottlfuncs/func_flatten_test.go @@ -144,6 +144,39 @@ func Test_flatten(t *testing.T) { }, }, }, + { + name: "max depth with slice", + target: map[string]any{ + "0": map[string]any{ + "1": map[string]any{ + "2": map[string]any{ + "3": "value", + }, + }, + }, + "1": map[string]any{ + "1": []any{ + map[string]any{ + "1": "value", + }, + }, + }, + }, + prefix: ottl.Optional[string]{}, + depth: ottl.NewTestingOptional[int64](1), + expected: map[string]any{ + "0.1": map[string]any{ + "2": map[string]any{ + "3": "value", + }, + }, + "1.1": []any{ + map[string]any{ + "1": "value", + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -179,11 +212,29 @@ func Test_flatten_bad_target(t *testing.T) { } func Test_flatten_bad_depth(t *testing.T) { - target := &ottl.StandardPMapGetter[any]{ - Getter: func(_ context.Context, _ any) (any, error) { - return pcommon.NewMap(), nil + tests := []struct { + name string + depth ottl.Optional[int64] + }{ + { + name: "negative depth", + depth: ottl.NewTestingOptional[int64](-1), + }, + { + name: "zero depth", + depth: ottl.NewTestingOptional[int64](0), }, } - _, err := flatten[any](target, ottl.Optional[string]{}, ottl.NewTestingOptional[int64](-1)) - assert.Error(t, err) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + target := &ottl.StandardPMapGetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return pcommon.NewMap(), nil + }, + } + _, err := flatten[any](target, ottl.Optional[string]{}, tt.depth) + assert.Error(t, err) + }) + } } From 4711a64941665534e99faadbc1deaea8a5fd47ef Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Fri, 15 Nov 2024 18:11:00 -0500 Subject: [PATCH 10/14] [chore][ptraceutiltest] Use native structs in options (#36382) --- .../internal/plogutil/logs_test.go | 10 +-- .../internal/plogutiltest/logs.go | 2 +- .../internal/plogutiltest/logs_test.go | 12 +-- .../internal/pmetricutiltest/metrics.go | 4 +- .../internal/ptraceutil/traces_test.go | 80 ++++++++++++++----- .../internal/ptraceutiltest/traces.go | 73 +++++++---------- .../internal/ptraceutiltest/traces_test.go | 73 ++++++----------- connector/routingconnector/logs_test.go | 2 +- connector/routingconnector/traces_test.go | 19 +++-- 9 files changed, 145 insertions(+), 130 deletions(-) diff --git a/connector/routingconnector/internal/plogutil/logs_test.go b/connector/routingconnector/internal/plogutil/logs_test.go index 56637d3f9a28..b428e2e8583e 100644 --- a/connector/routingconnector/internal/plogutil/logs_test.go +++ b/connector/routingconnector/internal/plogutil/logs_test.go @@ -63,7 +63,7 @@ func TestMoveResourcesIf(t *testing.T) { from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plogutiltest.NewLogs("1", "2", "3"), expectFrom: plogutiltest.NewLogs("A", "CD", "EF"), - expectTo: plogutiltest.New( + expectTo: plogutiltest.NewLogsFromOpts( plogutiltest.Resource("1", plogutiltest.Scope("2", plogutiltest.LogRecord("3")), ), @@ -132,7 +132,7 @@ func TestMoveRecordsWithContextIf(t *testing.T) { }, from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plog.NewLogs(), - expectFrom: plogutiltest.New( + expectFrom: plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), @@ -161,7 +161,7 @@ func TestMoveRecordsWithContextIf(t *testing.T) { }, from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plog.NewLogs(), - expectFrom: plogutiltest.New( + expectFrom: plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), plogutiltest.Scope("D", plogutiltest.LogRecord("E")), @@ -191,7 +191,7 @@ func TestMoveRecordsWithContextIf(t *testing.T) { }, from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plog.NewLogs(), - expectFrom: plogutiltest.New( + expectFrom: plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), @@ -211,7 +211,7 @@ func TestMoveRecordsWithContextIf(t *testing.T) { from: plogutiltest.NewLogs("AB", "CD", "EF"), to: plogutiltest.NewLogs("1", "2", "3"), expectFrom: plogutiltest.NewLogs("AB", "C", "EF"), - expectTo: plogutiltest.New( + expectTo: plogutiltest.NewLogsFromOpts( plogutiltest.Resource("1", plogutiltest.Scope("2", plogutiltest.LogRecord("3")), ), diff --git a/connector/routingconnector/internal/plogutiltest/logs.go b/connector/routingconnector/internal/plogutiltest/logs.go index 675d5ad3eb85..81bd5761a3b2 100644 --- a/connector/routingconnector/internal/plogutiltest/logs.go +++ b/connector/routingconnector/internal/plogutiltest/logs.go @@ -37,7 +37,7 @@ func NewLogs(resourceIDs, scopeIDs, logRecordIDs string) plog.Logs { return ld } -func New(resources ...plog.ResourceLogs) plog.Logs { +func NewLogsFromOpts(resources ...plog.ResourceLogs) plog.Logs { ld := plog.NewLogs() for _, resource := range resources { resource.CopyTo(ld.ResourceLogs().AppendEmpty()) diff --git a/connector/routingconnector/internal/plogutiltest/logs_test.go b/connector/routingconnector/internal/plogutiltest/logs_test.go index 0f9d0de631aa..c00cef21bec4 100644 --- a/connector/routingconnector/internal/plogutiltest/logs_test.go +++ b/connector/routingconnector/internal/plogutiltest/logs_test.go @@ -17,7 +17,7 @@ func TestNewLogs(t *testing.T) { t.Run("empty", func(t *testing.T) { expected := plog.NewLogs() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("", "", ""))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New())) + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts())) }) t.Run("simple", func(t *testing.T) { @@ -32,7 +32,7 @@ func TestNewLogs(t *testing.T) { return ld }() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("A", "B", "C"))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("B", plogutiltest.LogRecord("C"))), ))) }) @@ -55,7 +55,7 @@ func TestNewLogs(t *testing.T) { return ld }() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("AB", "C", "D"))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("C", plogutiltest.LogRecord("D"))), plogutiltest.Resource("B", plogutiltest.Scope("C", plogutiltest.LogRecord("D"))), ))) @@ -77,7 +77,7 @@ func TestNewLogs(t *testing.T) { return ld }() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("A", "BC", "D"))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("B", plogutiltest.LogRecord("D")), plogutiltest.Scope("C", plogutiltest.LogRecord("D")), @@ -99,7 +99,7 @@ func TestNewLogs(t *testing.T) { return ld }() assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogs("A", "B", "CD"))) - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("B", plogutiltest.LogRecord("C"), plogutiltest.LogRecord("D"))), ))) }) @@ -127,7 +127,7 @@ func TestNewLogs(t *testing.T) { l.Body().SetStr("logG") // resourceB.scopeD.logG return ld }() - assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.New( + assert.NoError(t, plogtest.CompareLogs(expected, plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("C", plogutiltest.LogRecord("E")), plogutiltest.Scope("D", plogutiltest.LogRecord("E")), diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics.go b/connector/routingconnector/internal/pmetricutiltest/metrics.go index 18b59eb29cf8..fb1902759e70 100644 --- a/connector/routingconnector/internal/pmetricutiltest/metrics.go +++ b/connector/routingconnector/internal/pmetricutiltest/metrics.go @@ -5,7 +5,7 @@ package pmetricutiltest // import "github.com/open-telemetry/opentelemetry-colle import "go.opentelemetry.io/collector/pdata/pmetric" -// TestMetrics returns a pmetric.Metrics with a uniform structure where resources, scopes, metrics, +// NewMetrics returns a pmetric.Metrics with a uniform structure where resources, scopes, metrics, // and datapoints are identical across all instances, except for one identifying field. // // Identifying fields: @@ -14,7 +14,7 @@ import "go.opentelemetry.io/collector/pdata/pmetric" // - Metrics have a name with a value of "metricN" and a single time series of data points. // - DataPoints have an attribute "dpName" with a value of "dpN". // -// Example: TestMetrics("AB", "XYZ", "MN", "1234") returns: +// Example: NewMetrics("AB", "XYZ", "MN", "1234") returns: // // resourceA, resourceB // each with scopeX, scopeY, scopeZ diff --git a/connector/routingconnector/internal/ptraceutil/traces_test.go b/connector/routingconnector/internal/ptraceutil/traces_test.go index 40d05c5bec8e..9baca4fda0a1 100644 --- a/connector/routingconnector/internal/ptraceutil/traces_test.go +++ b/connector/routingconnector/internal/ptraceutil/traces_test.go @@ -130,12 +130,21 @@ func TestMoveSpansWithContextIf(t *testing.T) { from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), to: ptrace.NewTraces(), expectFrom: ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), - ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.Resource("A", + ptraceutiltest.Scope("C", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), + ptraceutiltest.Scope("D", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), ), - ptraceutiltest.WithResource('B', - ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.Resource("B", + ptraceutiltest.Scope("D", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), ), ), expectTo: ptraceutiltest.NewTraces("B", "C", "EF", "GH"), @@ -159,13 +168,24 @@ func TestMoveSpansWithContextIf(t *testing.T) { from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), to: ptrace.NewTraces(), expectFrom: ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), - ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH")), + ptraceutiltest.Resource("A", + ptraceutiltest.Scope("C", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), + ptraceutiltest.Scope("D", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), ), - ptraceutiltest.WithResource('B', - ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), - ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.Resource("B", + ptraceutiltest.Scope("C", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), + ptraceutiltest.Scope("D", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), ), ), expectTo: ptraceutiltest.NewTraces("A", "D", "F", "GH"), @@ -189,13 +209,23 @@ func TestMoveSpansWithContextIf(t *testing.T) { from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), to: ptrace.NewTraces(), expectFrom: ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), - ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.Resource("A", + ptraceutiltest.Scope("C", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), + ptraceutiltest.Scope("D", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), ), - ptraceutiltest.WithResource('B', - ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('F', "GH")), - ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.Resource("B", + ptraceutiltest.Scope("C", + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), + ptraceutiltest.Scope("D", + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), ), ), expectTo: ptraceutiltest.NewTraces("B", "CD", "E", "GH"), @@ -209,9 +239,19 @@ func TestMoveSpansWithContextIf(t *testing.T) { to: ptraceutiltest.NewTraces("1", "2", "3", "4"), expectFrom: ptraceutiltest.NewTraces("AB", "C", "EF", "GH"), expectTo: ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('1', ptraceutiltest.WithScope('2', ptraceutiltest.WithSpan('3', "4"))), - ptraceutiltest.WithResource('A', ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH"))), - ptraceutiltest.WithResource('B', ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH"))), + ptraceutiltest.Resource("1", ptraceutiltest.Scope("2", ptraceutiltest.Span("3", ptraceutiltest.SpanEvent("4")))), + ptraceutiltest.Resource("A", + ptraceutiltest.Scope("D", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), + ), + ptraceutiltest.Resource("B", + ptraceutiltest.Scope("D", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), + ), ), }, } diff --git a/connector/routingconnector/internal/ptraceutiltest/traces.go b/connector/routingconnector/internal/ptraceutiltest/traces.go index 4317a113e34b..b6d4413cbc2c 100644 --- a/connector/routingconnector/internal/ptraceutiltest/traces.go +++ b/connector/routingconnector/internal/ptraceutiltest/traces.go @@ -43,56 +43,43 @@ func NewTraces(resourceIDs, scopeIDs, spanIDs, spanEventIDs string) ptrace.Trace return td } -type Resource struct { - id byte - scopes []Scope -} - -type Scope struct { - id byte - spans []Span -} - -type Span struct { - id byte - spanEvents string +func NewTracesFromOpts(resources ...ptrace.ResourceSpans) ptrace.Traces { + md := ptrace.NewTraces() + for _, resource := range resources { + resource.CopyTo(md.ResourceSpans().AppendEmpty()) + } + return md } -func WithResource(id byte, scopes ...Scope) Resource { - r := Resource{id: id} - r.scopes = append(r.scopes, scopes...) - return r +func Resource(id string, scopes ...ptrace.ScopeSpans) ptrace.ResourceSpans { + rm := ptrace.NewResourceSpans() + rm.Resource().Attributes().PutStr("resourceName", "resource"+id) + for _, scope := range scopes { + scope.CopyTo(rm.ScopeSpans().AppendEmpty()) + } + return rm } -func WithScope(id byte, spans ...Span) Scope { - s := Scope{id: id} - s.spans = append(s.spans, spans...) +func Scope(id string, spans ...ptrace.Span) ptrace.ScopeSpans { + s := ptrace.NewScopeSpans() + s.Scope().SetName("scope" + id) + for _, span := range spans { + span.CopyTo(s.Spans().AppendEmpty()) + } return s } -func WithSpan(id byte, spanEvents string) Span { - return Span{id: id, spanEvents: spanEvents} +func Span(id string, ses ...ptrace.SpanEvent) ptrace.Span { + m := ptrace.NewSpan() + m.SetName("span" + id) + for _, se := range ses { + se.CopyTo(m.Events().AppendEmpty()) + } + return m } -// NewTracesFromOpts creates a ptrace.Traces with the specified resources, scopes, metrics, -// and data points. The general idea is the same as NewMetrics, but this function allows for -// more flexibility in creating non-uniform structures. -func NewTracesFromOpts(resources ...Resource) ptrace.Traces { - td := ptrace.NewTraces() - for _, resource := range resources { - r := td.ResourceSpans().AppendEmpty() - r.Resource().Attributes().PutStr("resourceName", "resource"+string(resource.id)) - for _, scope := range resource.scopes { - ss := r.ScopeSpans().AppendEmpty() - ss.Scope().SetName("scope" + string(scope.id)) - for _, span := range scope.spans { - s := ss.Spans().AppendEmpty() - s.SetName("span" + string(span.id)) - for i := 0; i < len(span.spanEvents); i++ { - s.Events().AppendEmpty().Attributes().PutStr("spanEventName", "spanEvent"+string(span.spanEvents[i])) - } - } - } - } - return td +func SpanEvent(id string) ptrace.SpanEvent { + dp := ptrace.NewSpanEvent() + dp.Attributes().PutStr("spanEventName", "spanEvent"+id) + return dp } diff --git a/connector/routingconnector/internal/ptraceutiltest/traces_test.go b/connector/routingconnector/internal/ptraceutiltest/traces_test.go index b0bb12690f6d..0c72431638dc 100644 --- a/connector/routingconnector/internal/ptraceutiltest/traces_test.go +++ b/connector/routingconnector/internal/ptraceutiltest/traces_test.go @@ -33,15 +33,10 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventD") // resourceA.scopeB.spanC.spanEventD return td }() - fromOpts := ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('B', - ptraceutiltest.WithSpan('C', "D"), - ), - ), - ) assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "B", "C", "D"))) - assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) + assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.Resource("A", ptraceutiltest.Scope("B", ptraceutiltest.Span("C", ptraceutiltest.SpanEvent("D")))), + ))) }) t.Run("two_resources", func(t *testing.T) { @@ -65,20 +60,11 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventE") // resourceB.scopeC.spanD.spanEventE return td }() - fromOpts := ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('C', - ptraceutiltest.WithSpan('D', "E"), - ), - ), - ptraceutiltest.WithResource('B', - ptraceutiltest.WithScope('C', - ptraceutiltest.WithSpan('D', "E"), - ), - ), - ) assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("AB", "C", "D", "E"))) - assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) + assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.Resource("A", ptraceutiltest.Scope("C", ptraceutiltest.Span("D", ptraceutiltest.SpanEvent("E")))), + ptraceutiltest.Resource("B", ptraceutiltest.Scope("C", ptraceutiltest.Span("D", ptraceutiltest.SpanEvent("E")))), + ))) }) t.Run("two_scopes", func(t *testing.T) { @@ -100,18 +86,13 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventE") // resourceA.scopeC.spanD.spanEventE return td }() - fromOpts := ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('B', - ptraceutiltest.WithSpan('D', "E"), - ), - ptraceutiltest.WithScope('C', - ptraceutiltest.WithSpan('D', "E"), - ), - ), - ) assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "BC", "D", "E"))) - assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) + assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.Resource("A", + ptraceutiltest.Scope("B", ptraceutiltest.Span("D", ptraceutiltest.SpanEvent("E"))), + ptraceutiltest.Scope("C", ptraceutiltest.Span("D", ptraceutiltest.SpanEvent("E"))), + ), + ))) }) t.Run("two_spans", func(t *testing.T) { @@ -131,16 +112,15 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventE") // resourceA.scopeB.spanD.spanEventE return td }() - fromOpts := ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('B', - ptraceutiltest.WithSpan('C', "E"), - ptraceutiltest.WithSpan('D', "E"), + assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "B", "CD", "E"))) + assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.Resource("A", + ptraceutiltest.Scope("B", + ptraceutiltest.Span("C", ptraceutiltest.SpanEvent("E")), + ptraceutiltest.Span("D", ptraceutiltest.SpanEvent("E")), ), ), - ) - assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "B", "CD", "E"))) - assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) + ))) }) t.Run("two_spanevents", func(t *testing.T) { @@ -158,14 +138,13 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventE") // resourceA.scopeB.spanC.spanEventE return td }() - fromOpts := ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('B', - ptraceutiltest.WithSpan('C', "DE"), + assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "B", "C", "DE"))) + assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.Resource("A", + ptraceutiltest.Scope("B", + ptraceutiltest.Span("C", ptraceutiltest.SpanEvent("D"), ptraceutiltest.SpanEvent("E")), ), ), - ) - assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "B", "C", "DE"))) - assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) + ))) }) } diff --git a/connector/routingconnector/logs_test.go b/connector/routingconnector/logs_test.go index bcad290a28bc..3cf9ca03321a 100644 --- a/connector/routingconnector/logs_test.go +++ b/connector/routingconnector/logs_test.go @@ -811,7 +811,7 @@ func TestLogsConnectorDetailed(t *testing.T) { input: plogutiltest.NewLogs("AB", "CD", "EF"), expectSink0: plogutiltest.NewLogs("B", "D", "EF"), expectSink1: plog.Logs{}, - expectSinkD: plogutiltest.New( + expectSinkD: plogutiltest.NewLogsFromOpts( plogutiltest.Resource("A", plogutiltest.Scope("C", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), plogutiltest.Scope("D", plogutiltest.LogRecord("E"), plogutiltest.LogRecord("F")), diff --git a/connector/routingconnector/traces_test.go b/connector/routingconnector/traces_test.go index 78f7e46f414a..fb597e2a6968 100644 --- a/connector/routingconnector/traces_test.go +++ b/connector/routingconnector/traces_test.go @@ -766,12 +766,21 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSink0: ptraceutiltest.NewTraces("B", "D", "EF", "GH"), expectSink1: ptrace.Traces{}, expectSinkD: ptraceutiltest.NewTracesFromOpts( - ptraceutiltest.WithResource('A', - ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), - ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.Resource("A", + ptraceutiltest.Scope("C", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), + ptraceutiltest.Scope("D", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), ), - ptraceutiltest.WithResource('B', - ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.Resource("B", + ptraceutiltest.Scope("C", + ptraceutiltest.Span("E", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ptraceutiltest.Span("F", ptraceutiltest.SpanEvent("G"), ptraceutiltest.SpanEvent("H")), + ), ), ), }, From 09a70334e584fcba0ab2f417327cec8c5f7055c7 Mon Sep 17 00:00:00 2001 From: Andrzej Stencel Date: Fri, 15 Nov 2024 17:14:52 -0700 Subject: [PATCH 11/14] [processor/span] Add 'keep_original_name' (#36397) This is another version of https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36120 with fixed conflicts. Thanks @lopes-felipe for creating it! #### Description Some tracing libraries add some attributes into the span name ([ex for the `db.system`](https://github.com/DataDog/dd-trace-java/blob/a420d2c66ade70af0ebc8c58ee2d4784bb0de38a/dd-java-agent/agent-otel/otel-shim/src/main/java/datadog/opentelemetry/shim/trace/OtelConventions.java#L168)), which can be further used to extract it as span attributes with this very processor. However, when that is done, the span name is replaced by a placeholder containing the attribute name. This PR adds the possibility of keeping the original span name unchanged with a `keep_original_name` config property, but keeping the existing behavior (renaming the span) as the default option. --------- Co-authored-by: Felipe Lopes --- .../span-rpocessor-keep-original-name.yaml | 27 +++++++++++++++++++ processor/spanprocessor/README.md | 14 +++++++++- processor/spanprocessor/config.go | 5 ++++ processor/spanprocessor/config_test.go | 14 +++++++++- processor/spanprocessor/span.go | 4 ++- processor/spanprocessor/span_test.go | 25 ++++++++++++++--- processor/spanprocessor/testdata/config.yaml | 9 +++++++ 7 files changed, 91 insertions(+), 7 deletions(-) create mode 100644 .chloggen/span-rpocessor-keep-original-name.yaml diff --git a/.chloggen/span-rpocessor-keep-original-name.yaml b/.chloggen/span-rpocessor-keep-original-name.yaml new file mode 100644 index 000000000000..d3e229b933a5 --- /dev/null +++ b/.chloggen/span-rpocessor-keep-original-name.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: processor/spanprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Add a new configuration option to keep the original span name when extracting attributes from the span name." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36120] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/processor/spanprocessor/README.md b/processor/spanprocessor/README.md index 454b01fa0a07..10eae85904d3 100644 --- a/processor/spanprocessor/README.md +++ b/processor/spanprocessor/README.md @@ -81,6 +81,9 @@ span name that is the output after processing the previous rule. - `break_after_match` (default = false): specifies if processing of rules should stop after the first match. If it is false rule processing will continue to be performed over the modified span name. +- `keep_original_name` (default = false): specifies if the original span name should be kept after +processing the rules. If it is true, the original span name will be kept, +otherwise it will be replaced with the placeholders of the captured attributes. ```yaml span/to_attributes: @@ -92,7 +95,7 @@ span/to_attributes: - regexp-rule3 ... break_after_match: - + keep_original_name: ``` Example: @@ -106,6 +109,15 @@ span/to_attributes: to_attributes: rules: - ^\/api\/v1\/document\/(?P.*)\/update$ + +# This example will add the same new "documentId"="12345678" attribute, +# but now resulting in an unchanged span name (/api/v1/document/12345678/update). +span/to_attributes_keep_original_name: + name: + to_attributes: + keep_original_name: true + rules: + - ^\/api\/v1\/document\/(?P.*)\/update$ ``` ### Set status for span diff --git a/processor/spanprocessor/config.go b/processor/spanprocessor/config.go index 1f9c536a33de..b96d4dbbbc19 100644 --- a/processor/spanprocessor/config.go +++ b/processor/spanprocessor/config.go @@ -68,6 +68,11 @@ type ToAttributes struct { // match. If it is false rule processing will continue to be performed over the // modified span name. BreakAfterMatch bool `mapstructure:"break_after_match"` + + // KeepOriginalName specifies if the original span name should be kept after + // processing the rules. If it is true the original span name will be kept, + // otherwise it will be replaced with the placeholders of the captured attributes. + KeepOriginalName bool `mapstructure:"keep_original_name"` } type Status struct { diff --git a/processor/spanprocessor/config_test.go b/processor/spanprocessor/config_test.go index f0488fb1c0df..b4f3aa0e1d24 100644 --- a/processor/spanprocessor/config_test.go +++ b/processor/spanprocessor/config_test.go @@ -46,7 +46,19 @@ func TestLoadingConfig(t *testing.T) { expected: &Config{ Rename: Name{ ToAttributes: &ToAttributes{ - Rules: []string{`^\/api\/v1\/document\/(?P.*)\/update$`}, + Rules: []string{`^\/api\/v1\/document\/(?P.*)\/update$`}, + KeepOriginalName: false, + }, + }, + }, + }, + { + id: component.MustNewIDWithName("span", "to_attributes_keep_original_name"), + expected: &Config{ + Rename: Name{ + ToAttributes: &ToAttributes{ + Rules: []string{`^\/api\/v1\/document\/(?P.*)\/update$`}, + KeepOriginalName: true, }, }, }, diff --git a/processor/spanprocessor/span.go b/processor/spanprocessor/span.go index ec629879703d..0f2992dc4abe 100644 --- a/processor/spanprocessor/span.go +++ b/processor/spanprocessor/span.go @@ -213,7 +213,9 @@ func (sp *spanProcessor) processToAttributes(span ptrace.Span) { } // Set new span name. - span.SetName(sb.String()) + if !sp.config.Rename.ToAttributes.KeepOriginalName { + span.SetName(sb.String()) + } if sp.config.Rename.ToAttributes.BreakAfterMatch { // Stop processing, break after first match is requested. diff --git a/processor/spanprocessor/span_test.go b/processor/spanprocessor/span_test.go index c759feb344ad..c25a059d1f6d 100644 --- a/processor/spanprocessor/span_test.go +++ b/processor/spanprocessor/span_test.go @@ -414,8 +414,9 @@ func TestSpanProcessor_NilName(t *testing.T) { // TestSpanProcessor_ToAttributes func TestSpanProcessor_ToAttributes(t *testing.T) { testCases := []struct { - rules []string - breakAfterMatch bool + rules []string + breakAfterMatch bool + keepOriginalName bool testCase }{ { @@ -460,9 +461,24 @@ func TestSpanProcessor_ToAttributes(t *testing.T) { { rules: []string{ - `^\/api\/v1\/document\/(?P.*)\/update\/4$`, - `^\/api\/(?P.*)\/document\/(?P.*)\/update\/4$`, + `^\/api\/.*\/document\/(?P.*)\/update\/3$`, + `^\/api\/(?P.*)\/document\/.*\/update\/3$`}, + testCase: testCase{ + inputName: "/api/v1/document/321083210/update/3", + outputName: "/api/v1/document/321083210/update/3", + outputAttributes: map[string]any{ + "documentId": "321083210", + "version": "v1", + }, }, + breakAfterMatch: false, + keepOriginalName: true, + }, + + { + rules: []string{ + `^\/api\/v1\/document\/(?P.*)\/update\/4$`, + `^\/api\/(?P.*)\/document\/(?P.*)\/update\/4$`}, testCase: testCase{ inputName: "/api/v1/document/321083210/update/4", outputName: "/api/v1/document/{documentId}/update/4", @@ -491,6 +507,7 @@ func TestSpanProcessor_ToAttributes(t *testing.T) { for _, tc := range testCases { oCfg.Rename.ToAttributes.Rules = tc.rules oCfg.Rename.ToAttributes.BreakAfterMatch = tc.breakAfterMatch + oCfg.Rename.ToAttributes.KeepOriginalName = tc.keepOriginalName tp, err := factory.CreateTraces(context.Background(), processortest.NewNopSettings(), oCfg, consumertest.NewNop()) require.NoError(t, err) require.NotNil(t, tp) diff --git a/processor/spanprocessor/testdata/config.yaml b/processor/spanprocessor/testdata/config.yaml index 26c1b3fd2f68..c86e4c7b5d48 100644 --- a/processor/spanprocessor/testdata/config.yaml +++ b/processor/spanprocessor/testdata/config.yaml @@ -61,6 +61,15 @@ span/to_attributes: rules: - ^\/api\/v1\/document\/(?P.*)\/update$ +# This example will add the same new "documentId"="12345678" attribute, +# but now resulting in an unchanged span name (/api/v1/document/12345678/update). +span/to_attributes_keep_original_name: + name: + to_attributes: + keep_original_name: true + rules: + - ^\/api\/v1\/document\/(?P.*)\/update$ + # The following demonstrates renaming the span name to `{operation_website}` # and adding the attribute {Key: operation_website, Value: } # when the span has the following properties From fbad29c8962bc2629f72ce72f1fb0517ce850d30 Mon Sep 17 00:00:00 2001 From: Anthony Mirabella Date: Fri, 15 Nov 2024 19:20:08 -0500 Subject: [PATCH 12/14] [cmd/githubgen] Add a flag to skip GitHub organization membership checks (#36398) #### Description Adds a `--skipgithub` flag to allow generating the `CODEOWNERS` file without requiring a PAT to verify membership in the OpenTelemetry GitHub organization. #### Link to tracking issue Fixes #36263 #### Testing Executed command without providing a PAT and confirmed it did not produce errors or unexpected changes to the `CODEOWNERS` file. #### Documentation Updated `README` Signed-off-by: Anthony J Mirabella --- .chloggen/feat_githubgen_skipmembercheck.yaml | 27 +++++++++++++++++++ cmd/githubgen/README.md | 2 +- cmd/githubgen/codeowners.go | 11 +++++--- cmd/githubgen/main.go | 5 ++-- 4 files changed, 39 insertions(+), 6 deletions(-) create mode 100644 .chloggen/feat_githubgen_skipmembercheck.yaml diff --git a/.chloggen/feat_githubgen_skipmembercheck.yaml b/.chloggen/feat_githubgen_skipmembercheck.yaml new file mode 100644 index 000000000000..930836158dfc --- /dev/null +++ b/.chloggen/feat_githubgen_skipmembercheck.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: cmd/githubgen + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Adds a flag to skip checking GitHub organization membership for CODEOWNERS + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36263] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/cmd/githubgen/README.md b/cmd/githubgen/README.md index 2703499eba8d..fa81f646815f 100644 --- a/cmd/githubgen/README.md +++ b/cmd/githubgen/README.md @@ -19,7 +19,7 @@ $> GITHUB_TOKEN= githubgen --folder . [--allowlist cmd/githubgen/all ## Checking codeowners against OpenTelemetry membership via Github API -To authenticate, set the environment variable `GITHUB_TOKEN` to a PAT token. +To authenticate, set the environment variable `GITHUB_TOKEN` to a PAT token. If a PAT is not available you can use the `--skipgithub` flag to avoid checking for membership in the GitHub organization. For each codeowner, the script will check if the user is registered as a member of the OpenTelemetry organization. diff --git a/cmd/githubgen/codeowners.go b/cmd/githubgen/codeowners.go index dbe8c64f4e85..e33ce9d65efd 100644 --- a/cmd/githubgen/codeowners.go +++ b/cmd/githubgen/codeowners.go @@ -71,6 +71,7 @@ const distributionCodeownersHeader = ` ` type codeownersGenerator struct { + skipGithub bool } func (cg codeownersGenerator) generate(data *githubData) error { @@ -91,7 +92,7 @@ func (cg codeownersGenerator) generate(data *githubData) error { } var missingCodeowners []string var duplicateCodeowners []string - members, err := getGithubMembers() + members, err := cg.getGithubMembers() if err != nil { return err } @@ -109,7 +110,7 @@ func (cg codeownersGenerator) generate(data *githubData) error { duplicateCodeowners = append(duplicateCodeowners, codeowner) } } - if len(missingCodeowners) > 0 { + if len(missingCodeowners) > 0 && !cg.skipGithub { sort.Strings(missingCodeowners) return fmt.Errorf("codeowners are not members: %s", strings.Join(missingCodeowners, ", ")) } @@ -189,7 +190,11 @@ LOOP: return nil } -func getGithubMembers() (map[string]struct{}, error) { +func (cg codeownersGenerator) getGithubMembers() (map[string]struct{}, error) { + if cg.skipGithub { + // don't try to get organization members if no token is expected + return map[string]struct{}{}, nil + } githubToken := os.Getenv("GITHUB_TOKEN") if githubToken == "" { return nil, fmt.Errorf("Set the environment variable `GITHUB_TOKEN` to a PAT token to authenticate") diff --git a/cmd/githubgen/main.go b/cmd/githubgen/main.go index 09431fef0fc0..10e87c4ecfc3 100644 --- a/cmd/githubgen/main.go +++ b/cmd/githubgen/main.go @@ -32,6 +32,7 @@ type generator interface { func main() { folder := flag.String("folder", ".", "folder investigated for codeowners") allowlistFilePath := flag.String("allowlist", "cmd/githubgen/allowlist.txt", "path to a file containing an allowlist of members outside the OpenTelemetry organization") + skipGithubCheck := flag.Bool("skipgithub", false, "skip checking GitHub membership check for CODEOWNERS generator") flag.Parse() var generators []generator for _, arg := range flag.Args() { @@ -39,7 +40,7 @@ func main() { case "issue-templates": generators = append(generators, issueTemplatesGenerator{}) case "codeowners": - generators = append(generators, codeownersGenerator{}) + generators = append(generators, codeownersGenerator{skipGithub: *skipGithubCheck}) case "distributions": generators = append(generators, distributionsGenerator{}) default: @@ -47,7 +48,7 @@ func main() { } } if len(generators) == 0 { - generators = []generator{issueTemplatesGenerator{}, codeownersGenerator{}} + generators = []generator{issueTemplatesGenerator{}, codeownersGenerator{skipGithub: *skipGithubCheck}} } if err := run(*folder, *allowlistFilePath, generators); err != nil { log.Fatal(err) From c7ecf2c02cec13afc871f67c5bc16aff43abe4fb Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Fri, 15 Nov 2024 16:51:17 -0800 Subject: [PATCH 13/14] [chore] Fix usage of setupTestTelemetry (#36394) Signed-off-by: Bogdan Drutu --- .../servicegraphconnector/connector_test.go | 1 + .../processor_decisions_test.go | 42 +++++++++---------- receiver/kafkareceiver/kafka_receiver_test.go | 9 ++++ receiver/solacereceiver/receiver_test.go | 7 ++++ 4 files changed, 38 insertions(+), 21 deletions(-) diff --git a/connector/servicegraphconnector/connector_test.go b/connector/servicegraphconnector/connector_test.go index 3b6e1a4beda7..8dba2717a06f 100644 --- a/connector/servicegraphconnector/connector_test.go +++ b/connector/servicegraphconnector/connector_test.go @@ -606,6 +606,7 @@ func TestValidateOwnTelemetry(t *testing.T) { }, }, }) + require.NoError(t, set.Shutdown(context.Background())) } func TestExtraDimensionsLabels(t *testing.T) { diff --git a/processor/tailsamplingprocessor/processor_decisions_test.go b/processor/tailsamplingprocessor/processor_decisions_test.go index e50ea4f7ce59..c198316fc418 100644 --- a/processor/tailsamplingprocessor/processor_decisions_test.go +++ b/processor/tailsamplingprocessor/processor_decisions_test.go @@ -25,8 +25,7 @@ func TestSamplingPolicyTypicalPath(t *testing.T) { NumTraces: defaultNumTraces, } nextConsumer := new(consumertest.TracesSink) - s := setupTestTelemetry() - ct := s.NewSettings() + tel := setupTestTelemetry() idb := newSyncIDBatcher() mpe1 := &mockPolicyEvaluator{} @@ -35,7 +34,7 @@ func TestSamplingPolicyTypicalPath(t *testing.T) { {name: "mock-policy-1", evaluator: mpe1, attribute: metric.WithAttributes(attribute.String("policy", "mock-policy-1"))}, } - p, err := newTracesProcessor(context.Background(), ct, nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) + p, err := newTracesProcessor(context.Background(), tel.NewSettings(), nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) require.NoError(t, err) require.NoError(t, p.Start(context.Background(), componenttest.NewNopHost())) @@ -62,6 +61,7 @@ func TestSamplingPolicyTypicalPath(t *testing.T) { // The final decision SHOULD be Sampled. require.EqualValues(t, 1, nextConsumer.SpanCount()) + require.NoError(t, tel.Shutdown(context.Background())) } func TestSamplingPolicyInvertSampled(t *testing.T) { @@ -70,8 +70,7 @@ func TestSamplingPolicyInvertSampled(t *testing.T) { NumTraces: defaultNumTraces, } nextConsumer := new(consumertest.TracesSink) - s := setupTestTelemetry() - ct := s.NewSettings() + tel := setupTestTelemetry() idb := newSyncIDBatcher() mpe1 := &mockPolicyEvaluator{} @@ -80,7 +79,7 @@ func TestSamplingPolicyInvertSampled(t *testing.T) { {name: "mock-policy-1", evaluator: mpe1, attribute: metric.WithAttributes(attribute.String("policy", "mock-policy-1"))}, } - p, err := newTracesProcessor(context.Background(), ct, nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) + p, err := newTracesProcessor(context.Background(), tel.NewSettings(), nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) require.NoError(t, err) require.NoError(t, p.Start(context.Background(), componenttest.NewNopHost())) @@ -107,6 +106,7 @@ func TestSamplingPolicyInvertSampled(t *testing.T) { // The final decision SHOULD be Sampled. require.EqualValues(t, 1, nextConsumer.SpanCount()) + require.NoError(t, tel.Shutdown(context.Background())) } func TestSamplingMultiplePolicies(t *testing.T) { @@ -115,8 +115,7 @@ func TestSamplingMultiplePolicies(t *testing.T) { NumTraces: defaultNumTraces, } nextConsumer := new(consumertest.TracesSink) - s := setupTestTelemetry() - ct := s.NewSettings() + tel := setupTestTelemetry() idb := newSyncIDBatcher() mpe1 := &mockPolicyEvaluator{} @@ -127,7 +126,7 @@ func TestSamplingMultiplePolicies(t *testing.T) { {name: "mock-policy-2", evaluator: mpe2, attribute: metric.WithAttributes(attribute.String("policy", "mock-policy-2"))}, } - p, err := newTracesProcessor(context.Background(), ct, nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) + p, err := newTracesProcessor(context.Background(), tel.NewSettings(), nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) require.NoError(t, err) require.NoError(t, p.Start(context.Background(), componenttest.NewNopHost())) @@ -158,6 +157,7 @@ func TestSamplingMultiplePolicies(t *testing.T) { // The final decision SHOULD be Sampled. require.EqualValues(t, 1, nextConsumer.SpanCount()) + require.NoError(t, tel.Shutdown(context.Background())) } func TestSamplingPolicyDecisionNotSampled(t *testing.T) { @@ -166,8 +166,7 @@ func TestSamplingPolicyDecisionNotSampled(t *testing.T) { NumTraces: defaultNumTraces, } nextConsumer := new(consumertest.TracesSink) - s := setupTestTelemetry() - ct := s.NewSettings() + tel := setupTestTelemetry() idb := newSyncIDBatcher() mpe1 := &mockPolicyEvaluator{} @@ -176,7 +175,7 @@ func TestSamplingPolicyDecisionNotSampled(t *testing.T) { {name: "mock-policy-1", evaluator: mpe1, attribute: metric.WithAttributes(attribute.String("policy", "mock-policy-1"))}, } - p, err := newTracesProcessor(context.Background(), ct, nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) + p, err := newTracesProcessor(context.Background(), tel.NewSettings(), nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) require.NoError(t, err) require.NoError(t, p.Start(context.Background(), componenttest.NewNopHost())) @@ -204,6 +203,7 @@ func TestSamplingPolicyDecisionNotSampled(t *testing.T) { // The final decision SHOULD be NotSampled. require.EqualValues(t, 0, nextConsumer.SpanCount()) + require.NoError(t, tel.Shutdown(context.Background())) } func TestSamplingPolicyDecisionInvertNotSampled(t *testing.T) { @@ -212,8 +212,7 @@ func TestSamplingPolicyDecisionInvertNotSampled(t *testing.T) { NumTraces: defaultNumTraces, } nextConsumer := new(consumertest.TracesSink) - s := setupTestTelemetry() - ct := s.NewSettings() + tel := setupTestTelemetry() idb := newSyncIDBatcher() mpe1 := &mockPolicyEvaluator{} @@ -224,7 +223,7 @@ func TestSamplingPolicyDecisionInvertNotSampled(t *testing.T) { {name: "mock-policy-2", evaluator: mpe2, attribute: metric.WithAttributes(attribute.String("policy", "mock-policy-2"))}, } - p, err := newTracesProcessor(context.Background(), ct, nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) + p, err := newTracesProcessor(context.Background(), tel.NewSettings(), nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) require.NoError(t, err) require.NoError(t, p.Start(context.Background(), componenttest.NewNopHost())) @@ -255,6 +254,7 @@ func TestSamplingPolicyDecisionInvertNotSampled(t *testing.T) { // The final decision SHOULD be NotSampled. require.EqualValues(t, 0, nextConsumer.SpanCount()) + require.NoError(t, tel.Shutdown(context.Background())) } func TestLateArrivingSpansAssignedOriginalDecision(t *testing.T) { @@ -263,8 +263,7 @@ func TestLateArrivingSpansAssignedOriginalDecision(t *testing.T) { NumTraces: defaultNumTraces, } nextConsumer := new(consumertest.TracesSink) - s := setupTestTelemetry() - ct := s.NewSettings() + tel := setupTestTelemetry() idb := newSyncIDBatcher() mpe1 := &mockPolicyEvaluator{} @@ -275,7 +274,7 @@ func TestLateArrivingSpansAssignedOriginalDecision(t *testing.T) { {name: "mock-policy-2", evaluator: mpe2, attribute: metric.WithAttributes(attribute.String("policy", "mock-policy-2"))}, } - p, err := newTracesProcessor(context.Background(), ct, nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) + p, err := newTracesProcessor(context.Background(), tel.NewSettings(), nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies)) require.NoError(t, err) require.NoError(t, p.Start(context.Background(), componenttest.NewNopHost())) @@ -325,6 +324,7 @@ func TestLateArrivingSpansAssignedOriginalDecision(t *testing.T) { require.EqualValues(t, 1, mpe1.EvaluationCount) require.EqualValues(t, 1, mpe2.EvaluationCount) require.EqualValues(t, 0, nextConsumer.SpanCount(), "original final decision not honored") + require.NoError(t, tel.Shutdown(context.Background())) } func TestLateArrivingSpanUsesDecisionCache(t *testing.T) { @@ -333,8 +333,7 @@ func TestLateArrivingSpanUsesDecisionCache(t *testing.T) { NumTraces: defaultNumTraces, } nextConsumer := new(consumertest.TracesSink) - s := setupTestTelemetry() - ct := s.NewSettings() + tel := setupTestTelemetry() idb := newSyncIDBatcher() mpe := &mockPolicyEvaluator{} @@ -345,7 +344,7 @@ func TestLateArrivingSpanUsesDecisionCache(t *testing.T) { // Use this instead of the default no-op cache c, err := cache.NewLRUDecisionCache[bool](200) require.NoError(t, err) - p, err := newTracesProcessor(context.Background(), ct, nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies), withSampledDecisionCache(c)) + p, err := newTracesProcessor(context.Background(), tel.NewSettings(), nextConsumer, cfg, withDecisionBatcher(idb), withPolicies(policies), withSampledDecisionCache(c)) require.NoError(t, err) require.NoError(t, p.Start(context.Background(), componenttest.NewNopHost())) @@ -399,4 +398,5 @@ func TestLateArrivingSpanUsesDecisionCache(t *testing.T) { require.NoError(t, p.ConsumeTraces(context.Background(), spanIndexToTraces(2))) require.EqualValues(t, 1, mpe.EvaluationCount) require.EqualValues(t, 2, nextConsumer.SpanCount(), "original final decision not honored") + require.NoError(t, tel.Shutdown(context.Background())) } diff --git a/receiver/kafkareceiver/kafka_receiver_test.go b/receiver/kafkareceiver/kafka_receiver_test.go index 61693d58eced..e353974acd91 100644 --- a/receiver/kafkareceiver/kafka_receiver_test.go +++ b/receiver/kafkareceiver/kafka_receiver_test.go @@ -189,6 +189,7 @@ func TestTracesConsumerGroupHandler(t *testing.T) { groupClaim.messageChan <- &sarama.ConsumerMessage{} close(groupClaim.messageChan) wg.Wait() + require.NoError(t, tel.Shutdown(context.Background())) } func TestTracesConsumerGroupHandler_session_done(t *testing.T) { @@ -233,6 +234,7 @@ func TestTracesConsumerGroupHandler_session_done(t *testing.T) { groupClaim.messageChan <- &sarama.ConsumerMessage{} cancelFunc() wg.Wait() + require.NoError(t, tel.Shutdown(context.Background())) } func TestTracesConsumerGroupHandler_error_unmarshal(t *testing.T) { @@ -331,6 +333,7 @@ func TestTracesConsumerGroupHandler_error_unmarshal(t *testing.T) { }, }, }) + require.NoError(t, tel.Shutdown(context.Background())) } func TestTracesConsumerGroupHandler_error_nextConsumer(t *testing.T) { @@ -531,6 +534,7 @@ func TestMetricsConsumerGroupHandler(t *testing.T) { groupClaim.messageChan <- &sarama.ConsumerMessage{} close(groupClaim.messageChan) wg.Wait() + require.NoError(t, tel.Shutdown(context.Background())) } func TestMetricsConsumerGroupHandler_session_done(t *testing.T) { @@ -574,6 +578,7 @@ func TestMetricsConsumerGroupHandler_session_done(t *testing.T) { groupClaim.messageChan <- &sarama.ConsumerMessage{} cancelFunc() wg.Wait() + require.NoError(t, tel.Shutdown(context.Background())) } func TestMetricsConsumerGroupHandler_error_unmarshal(t *testing.T) { @@ -672,6 +677,7 @@ func TestMetricsConsumerGroupHandler_error_unmarshal(t *testing.T) { }, }, }) + require.NoError(t, tel.Shutdown(context.Background())) } func TestMetricsConsumerGroupHandler_error_nextConsumer(t *testing.T) { @@ -887,6 +893,7 @@ func TestLogsConsumerGroupHandler(t *testing.T) { groupClaim.messageChan <- &sarama.ConsumerMessage{} close(groupClaim.messageChan) wg.Wait() + require.NoError(t, tel.Shutdown(context.Background())) } func TestLogsConsumerGroupHandler_session_done(t *testing.T) { @@ -930,6 +937,7 @@ func TestLogsConsumerGroupHandler_session_done(t *testing.T) { groupClaim.messageChan <- &sarama.ConsumerMessage{} cancelFunc() wg.Wait() + require.NoError(t, tel.Shutdown(context.Background())) } func TestLogsConsumerGroupHandler_error_unmarshal(t *testing.T) { @@ -1028,6 +1036,7 @@ func TestLogsConsumerGroupHandler_error_unmarshal(t *testing.T) { }, }, }) + require.NoError(t, tel.Shutdown(context.Background())) } func TestLogsConsumerGroupHandler_error_nextConsumer(t *testing.T) { diff --git a/receiver/solacereceiver/receiver_test.go b/receiver/solacereceiver/receiver_test.go index d6ce7913ce2a..e68850ae8c23 100644 --- a/receiver/solacereceiver/receiver_test.go +++ b/receiver/solacereceiver/receiver_test.go @@ -217,6 +217,7 @@ func TestReceiveMessage(t *testing.T) { if testCase.validation != nil { testCase.validation(t, tt) } + require.NoError(t, tt.Shutdown(context.Background())) }) } } @@ -281,6 +282,7 @@ func TestReceiveMessagesTerminateWithCtxDone(t *testing.T) { }, }, }) + require.NoError(t, tt.Shutdown(context.Background())) } func TestReceiverLifecycle(t *testing.T) { @@ -413,6 +415,7 @@ func TestReceiverLifecycle(t *testing.T) { }, }, }) + require.NoError(t, tt.Shutdown(context.Background())) } func TestReceiverDialFailureContinue(t *testing.T) { @@ -542,6 +545,7 @@ func TestReceiverDialFailureContinue(t *testing.T) { }, }, }) + require.NoError(t, tt.Shutdown(context.Background())) } func TestReceiverUnmarshalVersionFailureExpectingDisable(t *testing.T) { @@ -656,6 +660,7 @@ func TestReceiverUnmarshalVersionFailureExpectingDisable(t *testing.T) { }) err = receiver.Shutdown(context.Background()) assert.NoError(t, err) + require.NoError(t, tt.Shutdown(context.Background())) } func TestReceiverFlowControlDelayedRetry(t *testing.T) { @@ -935,6 +940,7 @@ func TestReceiverFlowControlDelayedRetry(t *testing.T) { }, }) } + require.NoError(t, tt.Shutdown(context.Background())) }) } } @@ -1153,6 +1159,7 @@ func TestReceiverFlowControlDelayedRetryMultipleRetries(t *testing.T) { }, }, }) + require.NoError(t, tt.Shutdown(context.Background())) } func newReceiver(t *testing.T) (*solaceTracesReceiver, *mockMessagingService, *mockUnmarshaller, componentTestTelemetry) { From 97659b59ea8f56538760e9ca812e503fdcce0ed4 Mon Sep 17 00:00:00 2001 From: Howard Cheung Date: Sun, 17 Nov 2024 22:18:01 +0800 Subject: [PATCH 14/14] feat (processor/k8sattributes): wait for synced when starting k8sattributes processor. (#32622) **Description:** When starting `k8sattributes` processor, block until an initial synchronization has been completed. This solves #32556 **Link to tracking Issue:** fix #32556 **Testing:** Tested in a cluster with constant high span traffic, no more spans with unassociated k8s metadata after adding this pr. **Documentation:** --------- Co-authored-by: Christos Markou --- .chloggen/k8sattributes-block.yaml | 27 ++++++ processor/k8sattributesprocessor/README.md | 15 +++ .../k8sattributesprocessor/client_test.go | 9 +- processor/k8sattributesprocessor/config.go | 7 ++ .../k8sattributesprocessor/config_test.go | 5 + processor/k8sattributesprocessor/factory.go | 7 ++ .../generated_component_test.go | 38 -------- .../internal/kube/client.go | 97 +++++++++++++------ .../internal/kube/client_test.go | 77 +++++++++++---- .../internal/kube/fake_informer.go | 4 +- .../internal/kube/kube.go | 4 +- .../k8sattributesprocessor/metadata.yaml | 1 + processor/k8sattributesprocessor/options.go | 17 ++++ processor/k8sattributesprocessor/processor.go | 37 ++++--- .../k8sattributesprocessor/processor_test.go | 2 +- 15 files changed, 238 insertions(+), 109 deletions(-) create mode 100644 .chloggen/k8sattributes-block.yaml diff --git a/.chloggen/k8sattributes-block.yaml b/.chloggen/k8sattributes-block.yaml new file mode 100644 index 000000000000..2b05153af746 --- /dev/null +++ b/.chloggen/k8sattributes-block.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: processor/k8sattributes + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Block when starting until the metadata have been synced, to fix that some data couldn't be associated with metadata when the agent was just started. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32556] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/processor/k8sattributesprocessor/README.md b/processor/k8sattributesprocessor/README.md index cdb82bc759aa..12a3e4d32f8a 100644 --- a/processor/k8sattributesprocessor/README.md +++ b/processor/k8sattributesprocessor/README.md @@ -198,6 +198,21 @@ the processor associates the received trace to the pod, based on the connection } ``` +By default, the processor will be ready as soon as it starts, even if no metadata has been fetched yet. +If data is sent to this processor before the metadata is synced, there will be no metadata to enrich the data with. + +To wait for the metadata to be synced before the processor is ready, set the `wait_for_metadata` option to `true`. +Then the processor will not be ready until the metadata is fully synced. As a result, the start-up of the Collector will be blocked. If the metadata cannot be synced, the Collector will ultimately fail to start. +If a timeout is reached, the processor will fail to start and return an error, which will cause the collector to exit. +The timeout defaults to 10s and can be configured with the `metadata_sync_timeout` option. + +example for setting the processor to wait for metadata to be synced before it is ready: + +```yaml +wait_for_metadata: true +wait_for_metadata_timeout: 10s +``` + ## Extracting attributes from pod labels and annotations The k8sattributesprocessor can also set resource attributes from k8s labels and annotations of pods, namespaces and nodes. diff --git a/processor/k8sattributesprocessor/client_test.go b/processor/k8sattributesprocessor/client_test.go index 2a893e52790c..c69e0638301a 100644 --- a/processor/k8sattributesprocessor/client_test.go +++ b/processor/k8sattributesprocessor/client_test.go @@ -4,6 +4,8 @@ package k8sattributesprocessor import ( + "time" + "go.opentelemetry.io/collector/component" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -35,7 +37,7 @@ func selectors() (labels.Selector, fields.Selector) { } // newFakeClient instantiates a new FakeClient object and satisfies the ClientProvider type -func newFakeClient(_ component.TelemetrySettings, _ k8sconfig.APIConfig, rules kube.ExtractionRules, filters kube.Filters, associations []kube.Association, _ kube.Excludes, _ kube.APIClientsetProvider, _ kube.InformerProvider, _ kube.InformerProviderNamespace, _ kube.InformerProviderReplicaSet) (kube.Client, error) { +func newFakeClient(_ component.TelemetrySettings, _ k8sconfig.APIConfig, rules kube.ExtractionRules, filters kube.Filters, associations []kube.Association, _ kube.Excludes, _ kube.APIClientsetProvider, _ kube.InformerProvider, _ kube.InformerProviderNamespace, _ kube.InformerProviderReplicaSet, _ bool, _ time.Duration) (kube.Client, error) { cs := fake.NewSimpleClientset() ls, fs := selectors() @@ -70,10 +72,11 @@ func (f *fakeClient) GetNode(nodeName string) (*kube.Node, bool) { } // Start is a noop for FakeClient. -func (f *fakeClient) Start() { +func (f *fakeClient) Start() error { if f.Informer != nil { - f.Informer.Run(f.StopCh) + go f.Informer.Run(f.StopCh) } + return nil } // Stop is a noop for FakeClient. diff --git a/processor/k8sattributesprocessor/config.go b/processor/k8sattributesprocessor/config.go index 27b49cef5d63..e5651a087bf4 100644 --- a/processor/k8sattributesprocessor/config.go +++ b/processor/k8sattributesprocessor/config.go @@ -6,6 +6,7 @@ package k8sattributesprocessor // import "github.com/open-telemetry/opentelemetr import ( "fmt" "regexp" + "time" "go.opentelemetry.io/collector/featuregate" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" @@ -46,6 +47,12 @@ type Config struct { // Exclude section allows to define names of pod that should be // ignored while tagging. Exclude ExcludeConfig `mapstructure:"exclude"` + + // WaitForMetadata is a flag that determines if the processor should wait k8s metadata to be synced when starting. + WaitForMetadata bool `mapstructure:"wait_for_metadata"` + + // WaitForMetadataTimeout is the maximum time the processor will wait for the k8s metadata to be synced. + WaitForMetadataTimeout time.Duration `mapstructure:"wait_for_metadata_timeout"` } func (cfg *Config) Validate() error { diff --git a/processor/k8sattributesprocessor/config_test.go b/processor/k8sattributesprocessor/config_test.go index 78826108016b..9510df99a3b7 100644 --- a/processor/k8sattributesprocessor/config_test.go +++ b/processor/k8sattributesprocessor/config_test.go @@ -6,6 +6,7 @@ package k8sattributesprocessor import ( "path/filepath" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,6 +35,7 @@ func TestLoadConfig(t *testing.T) { Extract: ExtractConfig{ Metadata: enabledAttributes(), }, + WaitForMetadataTimeout: 10 * time.Second, }, }, { @@ -105,6 +107,7 @@ func TestLoadConfig(t *testing.T) { {Name: "jaeger-collector"}, }, }, + WaitForMetadataTimeout: 10 * time.Second, }, }, { @@ -127,6 +130,7 @@ func TestLoadConfig(t *testing.T) { {Name: "jaeger-collector"}, }, }, + WaitForMetadataTimeout: 10 * time.Second, }, }, { @@ -149,6 +153,7 @@ func TestLoadConfig(t *testing.T) { {Name: "jaeger-collector"}, }, }, + WaitForMetadataTimeout: 10 * time.Second, }, }, { diff --git a/processor/k8sattributesprocessor/factory.go b/processor/k8sattributesprocessor/factory.go index 2c3eb39b2914..56d27a2b5102 100644 --- a/processor/k8sattributesprocessor/factory.go +++ b/processor/k8sattributesprocessor/factory.go @@ -5,6 +5,7 @@ package k8sattributesprocessor // import "github.com/open-telemetry/opentelemetr import ( "context" + "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" @@ -44,6 +45,7 @@ func createDefaultConfig() component.Config { Extract: ExtractConfig{ Metadata: enabledAttributes(), }, + WaitForMetadataTimeout: 10 * time.Second, } } @@ -202,5 +204,10 @@ func createProcessorOpts(cfg component.Config) []option { opts = append(opts, withExcludes(oCfg.Exclude)) + opts = append(opts, withWaitForMetadataTimeout(oCfg.WaitForMetadataTimeout)) + if oCfg.WaitForMetadata { + opts = append(opts, withWaitForMetadata(true)) + } + return opts } diff --git a/processor/k8sattributesprocessor/generated_component_test.go b/processor/k8sattributesprocessor/generated_component_test.go index 13918bbe7795..38c00260ae43 100644 --- a/processor/k8sattributesprocessor/generated_component_test.go +++ b/processor/k8sattributesprocessor/generated_component_test.go @@ -72,44 +72,6 @@ func TestComponentLifecycle(t *testing.T) { err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - c, err := tt.createFn(context.Background(), processortest.NewNopSettings(), cfg) - require.NoError(t, err) - host := componenttest.NewNopHost() - err = c.Start(context.Background(), host) - require.NoError(t, err) - require.NotPanics(t, func() { - switch tt.name { - case "logs": - e, ok := c.(processor.Logs) - require.True(t, ok) - logs := generateLifecycleTestLogs() - if !e.Capabilities().MutatesData { - logs.MarkReadOnly() - } - err = e.ConsumeLogs(context.Background(), logs) - case "metrics": - e, ok := c.(processor.Metrics) - require.True(t, ok) - metrics := generateLifecycleTestMetrics() - if !e.Capabilities().MutatesData { - metrics.MarkReadOnly() - } - err = e.ConsumeMetrics(context.Background(), metrics) - case "traces": - e, ok := c.(processor.Traces) - require.True(t, ok) - traces := generateLifecycleTestTraces() - if !e.Capabilities().MutatesData { - traces.MarkReadOnly() - } - err = e.ConsumeTraces(context.Background(), traces) - } - }) - require.NoError(t, err) - err = c.Shutdown(context.Background()) - require.NoError(t, err) - }) } } diff --git a/processor/k8sattributesprocessor/internal/kube/client.go b/processor/k8sattributesprocessor/internal/kube/client.go index 9624fb250b22..bf8746b3e8ef 100644 --- a/processor/k8sattributesprocessor/internal/kube/client.go +++ b/processor/k8sattributesprocessor/internal/kube/client.go @@ -5,6 +5,7 @@ package kube // import "github.com/open-telemetry/opentelemetry-collector-contri import ( "context" + "errors" "fmt" "regexp" "strings" @@ -39,18 +40,20 @@ var enableRFC3339Timestamp = featuregate.GlobalRegistry().MustRegister( // WatchClient is the main interface provided by this package to a kubernetes cluster. type WatchClient struct { - m sync.RWMutex - deleteMut sync.Mutex - logger *zap.Logger - kc kubernetes.Interface - informer cache.SharedInformer - namespaceInformer cache.SharedInformer - nodeInformer cache.SharedInformer - replicasetInformer cache.SharedInformer - replicasetRegex *regexp.Regexp - cronJobRegex *regexp.Regexp - deleteQueue []deleteRequest - stopCh chan struct{} + m sync.RWMutex + deleteMut sync.Mutex + logger *zap.Logger + kc kubernetes.Interface + informer cache.SharedInformer + namespaceInformer cache.SharedInformer + nodeInformer cache.SharedInformer + replicasetInformer cache.SharedInformer + replicasetRegex *regexp.Regexp + cronJobRegex *regexp.Regexp + deleteQueue []deleteRequest + stopCh chan struct{} + waitForMetadata bool + waitForMetadataTimeout time.Duration // A map containing Pod related data, used to associate them with resources. // Key can be either an IP address or Pod UID @@ -84,21 +87,36 @@ var rRegex = regexp.MustCompile(`^(.*)-[0-9a-zA-Z]+$`) var cronJobRegex = regexp.MustCompile(`^(.*)-[0-9]+$`) // New initializes a new k8s Client. -func New(set component.TelemetrySettings, apiCfg k8sconfig.APIConfig, rules ExtractionRules, filters Filters, associations []Association, exclude Excludes, newClientSet APIClientsetProvider, newInformer InformerProvider, newNamespaceInformer InformerProviderNamespace, newReplicaSetInformer InformerProviderReplicaSet) (Client, error) { +func New( + set component.TelemetrySettings, + apiCfg k8sconfig.APIConfig, + rules ExtractionRules, + filters Filters, + associations []Association, + exclude Excludes, + newClientSet APIClientsetProvider, + newInformer InformerProvider, + newNamespaceInformer InformerProviderNamespace, + newReplicaSetInformer InformerProviderReplicaSet, + waitForMetadata bool, + waitForMetadataTimeout time.Duration, +) (Client, error) { telemetryBuilder, err := metadata.NewTelemetryBuilder(set) if err != nil { return nil, err } c := &WatchClient{ - logger: set.Logger, - Rules: rules, - Filters: filters, - Associations: associations, - Exclude: exclude, - replicasetRegex: rRegex, - cronJobRegex: cronJobRegex, - stopCh: make(chan struct{}), - telemetryBuilder: telemetryBuilder, + logger: set.Logger, + Rules: rules, + Filters: filters, + Associations: associations, + Exclude: exclude, + replicasetRegex: rRegex, + cronJobRegex: cronJobRegex, + stopCh: make(chan struct{}), + telemetryBuilder: telemetryBuilder, + waitForMetadata: waitForMetadata, + waitForMetadataTimeout: waitForMetadataTimeout, } go c.deleteLoop(time.Second*30, defaultPodDeleteGracePeriod) @@ -189,50 +207,67 @@ func New(set component.TelemetrySettings, apiCfg k8sconfig.APIConfig, rules Extr } // Start registers pod event handlers and starts watching the kubernetes cluster for pod changes. -func (c *WatchClient) Start() { - _, err := c.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ +func (c *WatchClient) Start() error { + synced := make([]cache.InformerSynced, 0) + reg, err := c.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handlePodAdd, UpdateFunc: c.handlePodUpdate, DeleteFunc: c.handlePodDelete, }) if err != nil { - c.logger.Error("error adding event handler to pod informer", zap.Error(err)) + return err } + synced = append(synced, reg.HasSynced) go c.informer.Run(c.stopCh) - _, err = c.namespaceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + reg, err = c.namespaceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleNamespaceAdd, UpdateFunc: c.handleNamespaceUpdate, DeleteFunc: c.handleNamespaceDelete, }) if err != nil { - c.logger.Error("error adding event handler to namespace informer", zap.Error(err)) + return err } + synced = append(synced, reg.HasSynced) go c.namespaceInformer.Run(c.stopCh) if c.Rules.DeploymentName || c.Rules.DeploymentUID { - _, err = c.replicasetInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + reg, err = c.replicasetInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleReplicaSetAdd, UpdateFunc: c.handleReplicaSetUpdate, DeleteFunc: c.handleReplicaSetDelete, }) if err != nil { - c.logger.Error("error adding event handler to replicaset informer", zap.Error(err)) + return err } + synced = append(synced, reg.HasSynced) go c.replicasetInformer.Run(c.stopCh) } if c.nodeInformer != nil { - _, err = c.nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + reg, err = c.nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleNodeAdd, UpdateFunc: c.handleNodeUpdate, DeleteFunc: c.handleNodeDelete, }) if err != nil { - c.logger.Error("error adding event handler to node informer", zap.Error(err)) + return err } + synced = append(synced, reg.HasSynced) go c.nodeInformer.Run(c.stopCh) } + + if c.waitForMetadata { + timeoutCh := make(chan struct{}) + t := time.AfterFunc(c.waitForMetadataTimeout, func() { + close(timeoutCh) + }) + defer t.Stop() + if !cache.WaitForCacheSync(timeoutCh, synced...) { + return errors.New("failed to wait for caches to sync") + } + } + return nil } // Stop signals the the k8s watcher/informer to stop watching for new events. diff --git a/processor/k8sattributesprocessor/internal/kube/client_test.go b/processor/k8sattributesprocessor/internal/kube/client_test.go index 7f6b85d195c7..f5e9cd03bfdc 100644 --- a/processor/k8sattributesprocessor/internal/kube/client_test.go +++ b/processor/k8sattributesprocessor/internal/kube/client_test.go @@ -18,6 +18,8 @@ import ( apps_v1 "k8s.io/api/apps/v1" api_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" @@ -143,29 +145,18 @@ func nodeAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { } func TestDefaultClientset(t *testing.T) { - c, err := New(componenttest.NewNopTelemetrySettings(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, nil, nil, nil, nil) + c, err := New(componenttest.NewNopTelemetrySettings(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, nil, nil, nil, nil, false, 10*time.Second) assert.Error(t, err) assert.Equal(t, "invalid authType for kubernetes: ", err.Error()) assert.Nil(t, c) - c, err = New(componenttest.NewNopTelemetrySettings(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, newFakeAPIClientset, nil, nil, nil) + c, err = New(componenttest.NewNopTelemetrySettings(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, newFakeAPIClientset, nil, nil, nil, false, 10*time.Second) assert.NoError(t, err) assert.NotNil(t, c) } func TestBadFilters(t *testing.T) { - c, err := New( - componenttest.NewNopTelemetrySettings(), - k8sconfig.APIConfig{}, - ExtractionRules{}, - Filters{Fields: []FieldFilter{{Op: selection.Exists}}}, - []Association{}, - Excludes{}, - newFakeAPIClientset, - NewFakeInformer, - NewFakeNamespaceInformer, - NewFakeReplicaSetInformer, - ) + c, err := New(componenttest.NewNopTelemetrySettings(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{Fields: []FieldFilter{{Op: selection.Exists}}}, []Association{}, Excludes{}, newFakeAPIClientset, NewFakeInformer, NewFakeNamespaceInformer, NewFakeReplicaSetInformer, false, 10*time.Second) assert.Error(t, err) assert.Nil(t, c) } @@ -180,7 +171,7 @@ func TestClientStartStop(t *testing.T) { done := make(chan struct{}) assert.False(t, fctr.HasStopped()) go func() { - c.Start() + assert.NoError(t, c.Start()) close(done) }() c.Stop() @@ -201,7 +192,7 @@ func TestConstructorErrors(t *testing.T) { gotAPIConfig = c return nil, fmt.Errorf("error creating k8s client") } - c, err := New(componenttest.NewNopTelemetrySettings(), apiCfg, er, ff, []Association{}, Excludes{}, clientProvider, NewFakeInformer, NewFakeNamespaceInformer, nil) + c, err := New(componenttest.NewNopTelemetrySettings(), apiCfg, er, ff, []Association{}, Excludes{}, clientProvider, NewFakeInformer, NewFakeNamespaceInformer, nil, false, 10*time.Second) assert.Nil(t, c) assert.Error(t, err) assert.Equal(t, "error creating k8s client", err.Error()) @@ -1923,7 +1914,7 @@ func newTestClientWithRulesAndFilters(t *testing.T, f Filters) (*WatchClient, *o }, }, } - c, err := New(set, k8sconfig.APIConfig{}, ExtractionRules{}, f, associations, exclude, newFakeAPIClientset, NewFakeInformer, NewFakeNamespaceInformer, NewFakeReplicaSetInformer) + c, err := New(set, k8sconfig.APIConfig{}, ExtractionRules{}, f, associations, exclude, newFakeAPIClientset, NewFakeInformer, NewFakeNamespaceInformer, NewFakeReplicaSetInformer, false, 10*time.Second) require.NoError(t, err) return c.(*WatchClient), logs } @@ -1931,3 +1922,55 @@ func newTestClientWithRulesAndFilters(t *testing.T, f Filters) (*WatchClient, *o func newTestClient(t *testing.T) (*WatchClient, *observer.ObservedLogs) { return newTestClientWithRulesAndFilters(t, Filters{}) } + +type neverSyncedFakeClient struct { + cache.SharedInformer +} + +type neverSyncedResourceEventHandlerRegistration struct { + cache.ResourceEventHandlerRegistration +} + +func (n *neverSyncedResourceEventHandlerRegistration) HasSynced() bool { + return false +} + +func (n *neverSyncedFakeClient) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) { + delegate, err := n.SharedInformer.AddEventHandler(handler) + if err != nil { + return nil, err + } + return &neverSyncedResourceEventHandlerRegistration{ResourceEventHandlerRegistration: delegate}, nil +} + +func TestWaitForMetadata(t *testing.T) { + testCases := []struct { + name string + informerProvider InformerProvider + err bool + }{{ + name: "no wait", + informerProvider: NewFakeInformer, + err: false, + }, { + name: "wait but never synced", + informerProvider: func(client kubernetes.Interface, namespace string, labelSelector labels.Selector, fieldSelector fields.Selector) cache.SharedInformer { + return &neverSyncedFakeClient{NewFakeInformer(client, namespace, labelSelector, fieldSelector)} + }, + err: true, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c, err := New(componenttest.NewNopTelemetrySettings(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, newFakeAPIClientset, tc.informerProvider, nil, nil, true, 1*time.Second) + require.NoError(t, err) + + err = c.Start() + if tc.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/processor/k8sattributesprocessor/internal/kube/fake_informer.go b/processor/k8sattributesprocessor/internal/kube/fake_informer.go index fc5f479a6ddd..15d63c4e9c0c 100644 --- a/processor/k8sattributesprocessor/internal/kube/fake_informer.go +++ b/processor/k8sattributesprocessor/internal/kube/fake_informer.go @@ -40,7 +40,7 @@ func (f *FakeInformer) AddEventHandler(handler cache.ResourceEventHandler) (cach } func (f *FakeInformer) AddEventHandlerWithResyncPeriod(_ cache.ResourceEventHandler, _ time.Duration) (cache.ResourceEventHandlerRegistration, error) { - return nil, nil + return f, nil } func (f *FakeInformer) RemoveEventHandler(_ cache.ResourceEventHandlerRegistration) error { @@ -165,7 +165,7 @@ func (f *NoOpInformer) AddEventHandler(handler cache.ResourceEventHandler) (cach } func (f *NoOpInformer) AddEventHandlerWithResyncPeriod(_ cache.ResourceEventHandler, _ time.Duration) (cache.ResourceEventHandlerRegistration, error) { - return nil, nil + return f, nil } func (f *NoOpInformer) RemoveEventHandler(_ cache.ResourceEventHandlerRegistration) error { diff --git a/processor/k8sattributesprocessor/internal/kube/kube.go b/processor/k8sattributesprocessor/internal/kube/kube.go index 6145cb972235..9faeee2452dd 100644 --- a/processor/k8sattributesprocessor/internal/kube/kube.go +++ b/processor/k8sattributesprocessor/internal/kube/kube.go @@ -91,12 +91,12 @@ type Client interface { GetPod(PodIdentifier) (*Pod, bool) GetNamespace(string) (*Namespace, bool) GetNode(string) (*Node, bool) - Start() + Start() error Stop() } // ClientProvider defines a func type that returns a new Client. -type ClientProvider func(component.TelemetrySettings, k8sconfig.APIConfig, ExtractionRules, Filters, []Association, Excludes, APIClientsetProvider, InformerProvider, InformerProviderNamespace, InformerProviderReplicaSet) (Client, error) +type ClientProvider func(component.TelemetrySettings, k8sconfig.APIConfig, ExtractionRules, Filters, []Association, Excludes, APIClientsetProvider, InformerProvider, InformerProviderNamespace, InformerProviderReplicaSet, bool, time.Duration) (Client, error) // APIClientsetProvider defines a func type that initializes and return a new kubernetes // Clientset object. diff --git a/processor/k8sattributesprocessor/metadata.yaml b/processor/k8sattributesprocessor/metadata.yaml index a388cfcb3dfd..edfb8bbc414d 100644 --- a/processor/k8sattributesprocessor/metadata.yaml +++ b/processor/k8sattributesprocessor/metadata.yaml @@ -114,6 +114,7 @@ resource_attributes: tests: config: + skip_lifecycle: true goleak: skip: true diff --git a/processor/k8sattributesprocessor/options.go b/processor/k8sattributesprocessor/options.go index ec39cdb9b827..4ccccb0d4638 100644 --- a/processor/k8sattributesprocessor/options.go +++ b/processor/k8sattributesprocessor/options.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "regexp" + "time" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" "k8s.io/apimachinery/pkg/selection" @@ -381,3 +382,19 @@ func withExcludes(podExclude ExcludeConfig) option { return nil } } + +// withWaitForMetadata allows specifying whether to wait for pod metadata to be synced. +func withWaitForMetadata(wait bool) option { + return func(p *kubernetesprocessor) error { + p.waitForMetadata = wait + return nil + } +} + +// withWaitForMetadataTimeout allows specifying the timeout for waiting for pod metadata to be synced. +func withWaitForMetadataTimeout(timeout time.Duration) option { + return func(p *kubernetesprocessor) error { + p.waitForMetadataTimeout = timeout + return nil + } +} diff --git a/processor/k8sattributesprocessor/processor.go b/processor/k8sattributesprocessor/processor.go index 5d63cbbd100a..98499f5e5473 100644 --- a/processor/k8sattributesprocessor/processor.go +++ b/processor/k8sattributesprocessor/processor.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "strconv" + "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componentstatus" @@ -27,17 +28,19 @@ const ( ) type kubernetesprocessor struct { - cfg component.Config - options []option - telemetrySettings component.TelemetrySettings - logger *zap.Logger - apiConfig k8sconfig.APIConfig - kc kube.Client - passthroughMode bool - rules kube.ExtractionRules - filters kube.Filters - podAssociations []kube.Association - podIgnore kube.Excludes + cfg component.Config + options []option + telemetrySettings component.TelemetrySettings + logger *zap.Logger + apiConfig k8sconfig.APIConfig + kc kube.Client + passthroughMode bool + rules kube.ExtractionRules + filters kube.Filters + podAssociations []kube.Association + podIgnore kube.Excludes + waitForMetadata bool + waitForMetadataTimeout time.Duration } func (kp *kubernetesprocessor) initKubeClient(set component.TelemetrySettings, kubeClient kube.ClientProvider) error { @@ -45,7 +48,7 @@ func (kp *kubernetesprocessor) initKubeClient(set component.TelemetrySettings, k kubeClient = kube.New } if !kp.passthroughMode { - kc, err := kubeClient(set, kp.apiConfig, kp.rules, kp.filters, kp.podAssociations, kp.podIgnore, nil, nil, nil, nil) + kc, err := kubeClient(set, kp.apiConfig, kp.rules, kp.filters, kp.podAssociations, kp.podIgnore, nil, nil, nil, nil, kp.waitForMetadata, kp.waitForMetadataTimeout) if err != nil { return err } @@ -60,7 +63,7 @@ func (kp *kubernetesprocessor) Start(_ context.Context, host component.Host) err for _, opt := range allOptions { if err := opt(kp); err != nil { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) - return nil + return err } } @@ -69,11 +72,15 @@ func (kp *kubernetesprocessor) Start(_ context.Context, host component.Host) err err := kp.initKubeClient(kp.telemetrySettings, kubeClientProvider) if err != nil { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) - return nil + return err } } if !kp.passthroughMode { - go kp.kc.Start() + err := kp.kc.Start() + if err != nil { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) + return err + } } return nil } diff --git a/processor/k8sattributesprocessor/processor_test.go b/processor/k8sattributesprocessor/processor_test.go index b8cbf1ec66b0..790c612549b2 100644 --- a/processor/k8sattributesprocessor/processor_test.go +++ b/processor/k8sattributesprocessor/processor_test.go @@ -266,7 +266,7 @@ func TestNewProcessor(t *testing.T) { } func TestProcessorBadClientProvider(t *testing.T) { - clientProvider := func(_ component.TelemetrySettings, _ k8sconfig.APIConfig, _ kube.ExtractionRules, _ kube.Filters, _ []kube.Association, _ kube.Excludes, _ kube.APIClientsetProvider, _ kube.InformerProvider, _ kube.InformerProviderNamespace, _ kube.InformerProviderReplicaSet) (kube.Client, error) { + clientProvider := func(_ component.TelemetrySettings, _ k8sconfig.APIConfig, _ kube.ExtractionRules, _ kube.Filters, _ []kube.Association, _ kube.Excludes, _ kube.APIClientsetProvider, _ kube.InformerProvider, _ kube.InformerProviderNamespace, _ kube.InformerProviderReplicaSet, _ bool, _ time.Duration) (kube.Client, error) { return nil, fmt.Errorf("bad client error") }