Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add unit test for block-builder #4289

Merged
merged 4 commits into from
Nov 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/tempo/app/modules.go
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,7 @@ func (t *App) setupModuleManager() error {
// composite targets
SingleBinary: {Compactor, QueryFrontend, Querier, Ingester, Distributor, MetricsGenerator},
ScalableSingleBinary: {SingleBinary},
//GeneratorBuilder: {MetricsGenerator, BlockBuilder},
// GeneratorBuilder: {MetricsGenerator, BlockBuilder},
}

for mod, targets := range deps {
Expand Down
1 change: 0 additions & 1 deletion integration/e2e/ingest/ingest_storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,5 +104,4 @@ func TestIngest(t *testing.T) {
require.NoError(t, err)

util.SearchStreamAndAssertTrace(t, context.Background(), grpcClient, info, now.Add(-20*time.Minute).Unix(), now.Unix())

}
2 changes: 1 addition & 1 deletion integration/e2e/ingest/kafka.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func NewKafka() *e2e.HTTPService {
"kafka",
kafkaImage,
e2e.NewCommand("/etc/confluent/docker/run"),
//e2e.NewCmdReadinessProbe(e2e.NewCommand("kafka-topics", "--bootstrap-server", "broker:29092", "--list")),
// e2e.NewCmdReadinessProbe(e2e.NewCommand("kafka-topics", "--bootstrap-server", "broker:29092", "--list")),
e2e.NewCmdReadinessProbe(e2e.NewCommand("sh", "-c", "nc -z localhost 9092 || exit 1")), // TODO: A bit unstable, sometimes it fails
9092,
29092,
Expand Down
159 changes: 159 additions & 0 deletions modules/blockbuilder/blockbuilder_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
package blockbuilder

import (
"context"
"errors"
"testing"
"time"

"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/grafana/tempo/modules/storage"
"github.com/grafana/tempo/pkg/ingest"
"github.com/grafana/tempo/pkg/ingest/testkafka"
"github.com/grafana/tempo/pkg/util"
"github.com/grafana/tempo/pkg/util/test"
"github.com/grafana/tempo/tempodb"
"github.com/grafana/tempo/tempodb/backend"
"github.com/grafana/tempo/tempodb/backend/local"
"github.com/grafana/tempo/tempodb/encoding"
"github.com/grafana/tempo/tempodb/encoding/common"
"github.com/grafana/tempo/tempodb/wal"
"github.com/stretchr/testify/require"
"github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kmsg"
"go.uber.org/atomic"
)

const testTopic = "test-topic"

func TestBlockbuilder(t *testing.T) {
ctx, cancel := context.WithCancelCause(context.Background())
t.Cleanup(func() { cancel(errors.New("test done")) })

k, address := testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, 1, "test-topic")
t.Cleanup(k.Close)

kafkaCommits := atomic.NewInt32(0)
k.ControlKey(kmsg.OffsetCommit.Int16(), func(kmsg.Request) (kmsg.Response, error, bool) {
kafkaCommits.Add(1)
return nil, nil, false
})

store := newStore(t)
store.EnablePolling(ctx, nil)
cfg := blockbuilderConfig(t, address)

b := New(cfg, test.NewTestingLogger(t), newPartitionRingReader(), &mockOverrides{}, store)
require.NoError(t, services.StartAndAwaitRunning(ctx, b))
t.Cleanup(func() {
require.NoError(t, services.StopAndAwaitTerminated(ctx, b))
})

req := test.MakePushBytesRequest(t, 10, nil)
records, err := ingest.Encode(0, util.FakeTenantID, req, 1_000_000)
require.NoError(t, err)

client := newKafkaClient(t, cfg.IngestStorageConfig.Kafka)

res := client.ProduceSync(ctx, records...)
require.NoError(t, res.FirstErr())

// Wait for record to be consumed and committed.
require.Eventually(t, func() bool {
return kafkaCommits.Load() > 0
}, time.Minute, time.Second)

// Wait for the block to be flushed.
require.Eventually(t, func() bool {
return len(store.BlockMetas(util.FakeTenantID)) == 1
}, time.Minute, time.Second)
}

func blockbuilderConfig(_ *testing.T, address string) Config {
cfg := Config{}
flagext.DefaultValues(&cfg)

flagext.DefaultValues(&cfg.blockConfig)

flagext.DefaultValues(&cfg.IngestStorageConfig.Kafka)
cfg.IngestStorageConfig.Kafka.Address = address
cfg.IngestStorageConfig.Kafka.Topic = testTopic
cfg.IngestStorageConfig.Kafka.ConsumerGroup = "test-consumer-group"

cfg.AssignedPartitions = []int32{0}
cfg.LookbackOnNoCommit = 1 * time.Minute
cfg.ConsumeCycleDuration = 5 * time.Second

return cfg
}

func newStore(t *testing.T) storage.Store {
tmpDir := t.TempDir()
s, err := storage.NewStore(storage.Config{
Trace: tempodb.Config{
Backend: backend.Local,
Local: &local.Config{
Path: tmpDir,
},
Block: &common.BlockConfig{
IndexDownsampleBytes: 2,
BloomFP: 0.01,
BloomShardSizeBytes: 100_000,
Version: encoding.LatestEncoding().Version(),
Encoding: backend.EncLZ4_1M,
IndexPageSizeBytes: 1000,
},
WAL: &wal.Config{
Filepath: tmpDir,
},
BlocklistPoll: 5 * time.Second,
BlocklistPollFallback: true,
},
}, nil, test.NewTestingLogger(t))
require.NoError(t, err)
return s
}

var _ ring.PartitionRingReader = (*mockPartitionRingReader)(nil)

func newPartitionRingReader() *mockPartitionRingReader {
return &mockPartitionRingReader{
r: ring.NewPartitionRing(ring.PartitionRingDesc{
Partitions: map[int32]ring.PartitionDesc{
0: {State: ring.PartitionActive},
},
}),
}
}

type mockPartitionRingReader struct {
r *ring.PartitionRing
}

func (m *mockPartitionRingReader) PartitionRing() *ring.PartitionRing {
return m.r
}

var _ Overrides = (*mockOverrides)(nil)

type mockOverrides struct {
dc backend.DedicatedColumns
}

func (m *mockOverrides) DedicatedColumns(_ string) backend.DedicatedColumns { return m.dc }

func newKafkaClient(t *testing.T, config ingest.KafkaConfig) *kgo.Client {
writeClient, err := kgo.NewClient(
kgo.SeedBrokers(config.Address),
kgo.AllowAutoTopicCreation(),
kgo.DefaultProduceTopic(config.Topic),
// We will choose the partition of each record.
kgo.RecordPartitioner(kgo.ManualPartitioner()),
)
require.NoError(t, err)
t.Cleanup(writeClient.Close)

return writeClient
}
8 changes: 8 additions & 0 deletions modules/blockbuilder/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ type BlockConfig struct {
BlockCfg common.BlockConfig `yaml:"-,inline"`
}

func (c *BlockConfig) RegisterFlags(f *flag.FlagSet) {
c.RegisterFlagsAndApplyDefaults("", f)
}

func (c *BlockConfig) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) {
f.Uint64Var(&c.MaxBlockBytes, prefix+".max-block-bytes", 20*1024*1024, "Maximum size of a block.") // TODO - Review default

Expand All @@ -38,6 +42,10 @@ func (c *Config) Validate() error {
return nil
}

func (c *Config) RegisterFlags(f *flag.FlagSet) {
c.RegisterFlagsAndApplyDefaults("", f)
}

func (c *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) {
f.Var(newPartitionAssignmentVar(&c.AssignedPartitions), prefix+".assigned-partitions", "List of partitions assigned to this block builder.")
f.DurationVar(&c.ConsumeCycleDuration, prefix+".consume-cycle-duration", 5*time.Minute, "Interval between consumption cycles.")
Expand Down
6 changes: 5 additions & 1 deletion modules/blockbuilder/partition_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,11 @@ func newPartitionProcessor(logger log.Logger, blockCfg BlockConfig, overrides Ov
}

func (p *writer) PushBytes(tenant string, req *tempopb.PushBytesRequest) error {
level.Info(p.logger).Log("msg", "pushing bytes", "tenant", tenant, "num_traces", len(req.Traces))
level.Info(p.logger).Log(
"msg", "pushing bytes",
"tenant", tenant,
"num_traces", len(req.Traces),
)

i, err := p.instanceForTenant(tenant)
if err != nil {
Expand Down
23 changes: 17 additions & 6 deletions modules/blockbuilder/tenant_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,17 @@ import (
"github.com/grafana/tempo/tempodb/encoding"
"github.com/grafana/tempo/tempodb/encoding/common"
"github.com/grafana/tempo/tempodb/wal"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)

var metricBlockBuilderFlushedBlocks = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "tempo",
Subsystem: "block_builder",
Name: "flushed_blocks",
},
[]string{"tenant_id"},
)

type tenantStore struct {
Expand All @@ -27,7 +38,6 @@ type tenantStore struct {
}

func newTenantStore(tenantID string, cfg BlockConfig, logger log.Logger, wal *wal.WAL, enc encoding.VersionedEncoding, o Overrides) (*tenantStore, error) {

i := &tenantStore{
tenantID: tenantID,
cfg: cfg,
Expand All @@ -49,14 +59,11 @@ func (i *tenantStore) cutHeadBlock() error {
i.walBlocks = append(i.walBlocks, i.headBlock)
i.headBlock = nil
}

return nil
}

func (i *tenantStore) resetHeadBlock() error {
if err := i.cutHeadBlock(); err != nil {
return err
}

meta := &backend.BlockMeta{
BlockID: backend.NewUUID(), // TODO - Deterministic UUID
TenantID: i.tenantID,
Expand Down Expand Up @@ -106,9 +113,13 @@ func (i *tenantStore) Flush(ctx context.Context, store tempodb.Writer) error {
if err := store.WriteBlock(ctx, block); err != nil {
return err
}
metricBlockBuilderFlushedBlocks.WithLabelValues(i.tenantID).Inc()
}

return nil
// Clear the blocks
i.walBlocks = i.walBlocks[:0]

return i.resetHeadBlock()
}

func (i *tenantStore) buildWriteableBlock(ctx context.Context, b common.WALBlock) (tempodb.WriteableBlock, error) {
Expand Down
2 changes: 1 addition & 1 deletion modules/distributor/distributor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1623,7 +1623,7 @@ func prepare(t *testing.T, limits overrides.Config, logger kitlog.Logger) (*Dist
l := dslog.Level{}
_ = l.Set("error")
mw := receiver.MultiTenancyMiddleware()
d, err := New(distributorConfig, clientConfig, ingestersRing, generator_client.Config{}, nil, overrides, mw, logger, l, prometheus.NewPedanticRegistry())
d, err := New(distributorConfig, clientConfig, ingestersRing, generator_client.Config{}, nil, nil, overrides, mw, logger, l, prometheus.NewPedanticRegistry())
require.NoError(t, err)

return d, ingesters
Expand Down
2 changes: 1 addition & 1 deletion modules/ingester/instance_search_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ func TestInstanceSearchTagAndValuesV2(t *testing.T) {

// test that we are creating cache files for search tag values v2
// check that we have cache files for all complete blocks for all the cache keys
limit := i.limiter.limits.MaxBytesPerTagValuesQuery("fake")
limit := i.limiter.Limits().MaxBytesPerTagValuesQuery("fake")
cacheKeys := cacheKeysForTestSearchTagValuesV2(tagKey, queryThatMatches, limit)
for _, cacheKey := range cacheKeys {
for _, b := range i.completeBlocks {
Expand Down
3 changes: 1 addition & 2 deletions pkg/ingest/blockbuilder/consumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@ import (
"github.com/twmb/franz-go/plugin/kprom"
)

type Consumer struct {
}
type Consumer struct{}

type PartitionsFn func(context.Context, *kgo.Client, map[string][]int32)

Expand Down
3 changes: 2 additions & 1 deletion pkg/ingest/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ func Encode(partitionID int32, tenantID string, req *tempopb.PushBytesRequest, m

var records []*kgo.Record
batch := encoderPool.Get().(*tempopb.PushBytesRequest)
defer encoderPool.Put(req)
batch.Reset()
defer encoderPool.Put(batch)

if batch.Traces == nil {
batch.Traces = make([]tempopb.PreallocBytes, 0, 1024) // TODO - Why 1024? Lucky number?
Expand Down
Loading
Loading