diff --git a/CHANGELOG.md b/CHANGELOG.md index d338104a927..fcc700b82d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,8 +49,9 @@ * [ENHANCEMENT] Add disk caching in ingester SearchTagValuesV2 for completed blocks [#4069](https://github.com/grafana/tempo/pull/4069) (@electron0zero) * [ENHANCEMENT] chore: remove gofakeit dependency [#4274](https://github.com/grafana/tempo/pull/4274) (@javiermolinar) * [ENHANCEMENT] Add a max flush attempts and metric to the metrics generator [#4254](https://github.com/grafana/tempo/pull/4254) (@joe-elliott) -* [ENHANCEMENT] Collection of query-frontend changes to reduce allocs. [#4242]https://github.com/grafana/tempo/pull/4242 (@joe-elliott) +* [ENHANCEMENT] Collection of query-frontend changes to reduce allocs. [#4242](https://github.com/grafana/tempo/pull/4242) (@joe-elliott) * [ENHANCEMENT] Added `insecure-skip-verify` option in tempo-cli to skip SSL certificate validation when connecting to the S3 backend. [#44236](https://github.com/grafana/tempo/pull/4259) (@faridtmammadov) +* [ENHANCEMENT] Chore: delete spanlogger. [4312](https://github.com/grafana/tempo/pull/4312) (@javiermolinar) * [ENHANCEMENT] Add `invalid_utf8` to reasons spanmetrics will discard spans. [#4293](https://github.com/grafana/tempo/pull/4293) (@zalegrala) * [BUGFIX] Replace hedged requests roundtrips total with a counter. [#4063](https://github.com/grafana/tempo/pull/4063) [#4078](https://github.com/grafana/tempo/pull/4078) (@galalen) * [BUGFIX] Metrics generators: Correctly drop from the ring before stopping ingestion to reduce drops during a rollout. [#4101](https://github.com/grafana/tempo/pull/4101) (@joe-elliott) diff --git a/pkg/cache/redis_cache.go b/pkg/cache/redis_cache.go index c5494a92d79..4da1d5d1b38 100644 --- a/pkg/cache/redis_cache.go +++ b/pkg/cache/redis_cache.go @@ -13,9 +13,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - - util_log "github.com/grafana/tempo/pkg/util/log" - "github.com/grafana/tempo/pkg/util/spanlogger" ) // RedisCache type caches chunks in redis @@ -28,7 +25,6 @@ type RedisCache struct { // NewRedisCache creates a new RedisCache func NewRedisCache(name string, redisClient *RedisClient, reg prometheus.Registerer, logger log.Logger) *RedisCache { - util_log.WarnExperimentalUse("Redis cache") cache := &RedisCache{ name: name, redis: redisClient, @@ -68,16 +64,14 @@ func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string, var items [][]byte // Run a tracked request, using c.requestDuration to monitor requests. err := measureRequest(ctx, method, c.requestDuration, redisStatusCode, func(ctx context.Context) error { - log := spanlogger.FromContext(ctx) + t := trace.SpanFromContext(ctx) var err error items, err = c.redis.MGet(ctx, keys) if err != nil { - // nolint:errcheck - log.Error(err) level.Error(c.logger).Log("msg", "failed to get from redis", "name", c.name, "err", err) return err } - log.AddEvent("cache.keys.found", trace.WithAttributes(attribute.Int("keys", len(keys)))) + t.AddEvent("cache.keys.found", trace.WithAttributes(attribute.Int("keys", len(keys)))) return nil }) if err != nil { @@ -101,22 +95,20 @@ func (c *RedisCache) FetchKey(ctx context.Context, key string) (buf []byte, foun const method = "RedisCache.Get" // Run a tracked request, using c.requestDuration to monitor requests. err := measureRequest(ctx, method, c.requestDuration, redisStatusCode, func(ctx context.Context) error { - log := spanlogger.FromContext(ctx) + t := trace.SpanFromContext(ctx) var err error buf, err = c.redis.Get(ctx, key) if err != nil { - // nolint:errcheck - log.Error(err) if errors.Is(err, redis.Nil) { level.Debug(c.logger).Log("msg", "failed to get key from redis", "name", c.name, "err", err, "key", key) - log.AddEvent("cache.key.missed", trace.WithAttributes(attribute.String("key", key))) + t.AddEvent("cache.key.missed", trace.WithAttributes(attribute.String("key", key))) } else { level.Error(c.logger).Log("msg", "error requesting key from redis", "name", c.name, "err", err, "key", key) } return err } - log.AddEvent("cache.key.found", trace.WithAttributes(attribute.String("key", key))) + t.AddEvent("cache.key.found", trace.WithAttributes(attribute.String("key", key))) return nil }) if err != nil { diff --git a/pkg/util/spanlogger/spanlogger.go b/pkg/util/spanlogger/spanlogger.go deleted file mode 100644 index 3336a185951..00000000000 --- a/pkg/util/spanlogger/spanlogger.go +++ /dev/null @@ -1,119 +0,0 @@ -package spanlogger - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" - - "github.com/grafana/dskit/tenant" - util_log "github.com/grafana/tempo/pkg/util/log" -) - -type loggerCtxMarker struct{} - -const ( - TenantIDTagName = "tenant_ids" -) - -var ( - tracer = otel.Tracer("") - defaultNoopSpan = noop.Span{} - loggerCtxKey = &loggerCtxMarker{} -) - -// SpanLogger unifies tracing and logging, to reduce repetition. -type SpanLogger struct { - log.Logger - trace.Span -} - -// New makes a new SpanLogger, where logs will be sent to the global logger. -func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, context.Context) { - return NewWithLogger(ctx, util_log.Logger, method, kvps...) -} - -// NewWithLogger makes a new SpanLogger with a custom log.Logger to send logs -// to. The provided context will have the logger attached to it and can be -// retrieved with FromContext or FromContextWithFallback. -func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...interface{}) (*SpanLogger, context.Context) { - ctx, span := tracer.Start(ctx, method) - if ids, _ := tenant.TenantIDs(ctx); len(ids) > 0 { - span.SetAttributes(attribute.StringSlice(TenantIDTagName, ids)) - } - logger := &SpanLogger{ - Logger: log.With(util_log.WithContext(ctx, l), "method", method), - Span: span, - } - if len(kvps) > 0 { - level.Debug(logger).Log(kvps...) - } - - ctx = context.WithValue(ctx, loggerCtxKey, l) - return logger, ctx -} - -// FromContext returns a span logger using the current parent span. If there -// is no parent span, the SpanLogger will only log to the logger -// in the context. If the context doesn't have a logger, the global logger -// is used. -func FromContext(ctx context.Context) *SpanLogger { - return FromContextWithFallback(ctx, util_log.Logger) -} - -// FromContextWithFallback returns a span logger using the current parent span. -// IF there is no parent span, the SpanLogger will only log to the logger -// within the context. If the context doesn't have a logger, the fallback -// logger is used. -func FromContextWithFallback(ctx context.Context, fallback log.Logger) *SpanLogger { - logger, ok := ctx.Value(loggerCtxKey).(log.Logger) - if !ok { - logger = fallback - } - sp := trace.SpanFromContext(ctx) - if sp == nil { - sp = defaultNoopSpan - } - return &SpanLogger{ - Logger: util_log.WithContext(ctx, logger), - Span: sp, - } -} - -// Log implements gokit's Logger interface; sends logs to underlying logger and -// also puts the on the spans. -func (s *SpanLogger) Log(kvps ...interface{}) error { - s.Logger.Log(kvps...) - - for i := 0; i*2 < len(kvps); i++ { - key, ok := kvps[i*2].(string) - if !ok { - return fmt.Errorf("non-string key (pair #%d): %T", i, kvps[i*2]) - } - - switch t := kvps[i*2+1].(type) { - case bool: - s.Span.SetAttributes(attribute.Bool(key, t)) - case string: - s.Span.SetAttributes(attribute.String(key, t)) - } - } - return nil -} - -// Error sets error flag and logs the error on the span, if non-nil. Returns the err passed in. -func (s *SpanLogger) Error(err error) error { - if err == nil { - return nil - } - - s.Span.SetStatus(codes.Error, "") - s.Span.RecordError(err) - return err -}