Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pkg/stanza/fileconsumer] Add ability to read files asynchronously #25884

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .chloggen/add-threadpool-featuregate.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: 'enhancement'

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: fileconsumer

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Added a new feature gate that enables a thread pool mechanism to respect the poll_interval parameter.

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [18908]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
124 changes: 124 additions & 0 deletions pkg/stanza/fileconsumer/benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,15 @@
package fileconsumer

import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"testing"

"github.com/google/uuid"
"github.com/stretchr/testify/require"

"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fingerprint"
Expand All @@ -20,6 +25,13 @@ type fileInputBenchmark struct {
config func() *Config
}

type fileSizeBenchmark struct {
logs []int
maxConcurrent int
name string
desiredFiles int
}

type benchFile struct {
*os.File
log func(int)
Expand Down Expand Up @@ -180,3 +192,115 @@ func BenchmarkFileInput(b *testing.B) {
})
}
}

func max(nums ...int) int {
_max := 0
for _, n := range nums {
if _max < n {
_max = n
}
}
return _max
}

func (fileSize fileSizeBenchmark) createFiles(b *testing.B, rootDir string) int {

// the number of logs written to a file is selected in round-robin fashion from fileSize.logs
// eg. fileSize.logs = [10,100]
// It will create one half of files with 10*b.N lines and other with 100*b.N lines
// ileSize.logs = [10,100,500]
// It will create one third of files with 10*b.N lines and other with 100*b.N and remaining third with 500*b.N

getMessage := func(m int) string { return fmt.Sprintf("message %d", m) }
logs := make([]string, 0, b.N) // collect all the logs at beginning itself to and reuse same to write to files
for i := 0; i < max(fileSize.logs...)*b.N; i++ {
logs = append(logs, getMessage(i))
}
totalLogs := 0
for i := 0; i < fileSize.desiredFiles; i++ {
file := openFile(b, filepath.Join(rootDir, fmt.Sprintf("file_%s.log", uuid.NewString())))
// Use uuid.NewString() to introduce some randomness in file logs
// or else file consumer will detect a duplicate based on fingerprint
linesToWrite := b.N * fileSize.logs[i%len(fileSize.logs)]
file.WriteString(uuid.NewString() + strings.Join(logs[:linesToWrite], "\n") + "\n")
totalLogs += linesToWrite
}
return totalLogs
}

func BenchmarkFileSizeVarying(b *testing.B) {
testCases := []fileSizeBenchmark{
{
name: "varying_sizes",
logs: []int{10, 1000},
maxConcurrent: 50,
desiredFiles: 100,
},
{
name: "varying_sizes_more_files",
logs: []int{10, 1000},
maxConcurrent: 50,
desiredFiles: 200,
},
{
name: "varying_sizes_more_files_throttled",
logs: []int{10, 1000},
maxConcurrent: 30,
desiredFiles: 200,
},
{
name: "same_size_small",
logs: []int{10},
maxConcurrent: 50,
desiredFiles: 50,
},
{
name: "same_size_small_throttled",
logs: []int{10},
maxConcurrent: 10,
desiredFiles: 100,
},
}
for _, fileSize := range testCases {
b.Run(fileSize.name, func(b *testing.B) {
rootDir := b.TempDir()
cfg := NewConfig().includeDir(rootDir)
cfg.StartAt = "beginning"
cfg.MaxConcurrentFiles = fileSize.maxConcurrent
emitCalls := make(chan *emitParams, max(fileSize.logs...)*b.N) // large enough to hold all the logs

operator, _ := buildTestManager(b, cfg, withEmitChan(emitCalls))
operator.persister = testutil.NewMockPersister("test")
defer func() {
require.NoError(b, operator.Stop())
}()

// create first set of files
totalLogs := fileSize.createFiles(b, rootDir)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var once sync.Once
// wait for logs from two poll() cycles
for i := 0; i < totalLogs*2; i++ {
once.Do(func() {
// Reset once we get the first log
b.ResetTimer()
})
<-emitCalls
}
// Stop the timer, as we're measuring log throughput
b.StopTimer()
}()
operator.poll(context.Background())

// create new set of files, call poll() again
fileSize.createFiles(b, rootDir)
operator.poll(context.Background())

wg.Wait()
})

}
}
17 changes: 15 additions & 2 deletions pkg/stanza/fileconsumer/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fingerprint"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/splitter"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/trie"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
Expand All @@ -40,6 +41,13 @@ var allowFileDeletion = featuregate.GlobalRegistry().MustRegister(
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/16314"),
)

var useThreadPool = featuregate.GlobalRegistry().MustRegister(
"filelog.useThreadPool",
featuregate.StageAlpha,
featuregate.WithRegisterDescription("When enabled, log collection switches to a thread pool model, respecting the `poll_interval` config."),
// featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/16314"),
)

var AllowHeaderMetadataParsing = featuregate.GlobalRegistry().MustRegister(
"filelog.allowHeaderMetadataParsing",
featuregate.StageBeta,
Expand Down Expand Up @@ -160,7 +168,7 @@ func (c Config) buildManager(logger *zap.SugaredLogger, emit emit.Callback, fact
return nil, err
}

return &Manager{
manager := Manager{
SugaredLogger: logger.With("component", "fileconsumer"),
cancel: func() {},
readerFactory: readerFactory{
Expand All @@ -187,7 +195,12 @@ func (c Config) buildManager(logger *zap.SugaredLogger, emit emit.Callback, fact
deleteAfterRead: c.DeleteAfterRead,
knownFiles: make([]*reader, 0, 10),
seenPaths: make(map[string]struct{}, 100),
}, nil
}
if useThreadPool.IsEnabled() {
manager.readerChan = make(chan readerEnvelope, c.MaxConcurrentFiles/2)
manager.trie = trie.NewTrie()
}
return &manager, nil
}

func (c Config) validate() error {
Expand Down
32 changes: 32 additions & 0 deletions pkg/stanza/fileconsumer/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"go.uber.org/zap"

"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fingerprint"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/trie"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
)
Expand Down Expand Up @@ -45,6 +46,17 @@ type Manager struct {
seenPaths map[string]struct{}

currentFps []*fingerprint.Fingerprint

// Following fields are used only when useThreadPool is enabled
knownFilesLock sync.RWMutex
trieLock sync.RWMutex
once sync.Once

workerWg sync.WaitGroup
readerChan chan readerEnvelope

// TRIE - this data structure stores the fingerprint of the files which are currently being consumed
trie *trie.Trie
}

func (m *Manager) Start(persister operator.Persister) error {
Expand All @@ -61,6 +73,13 @@ func (m *Manager) Start(persister operator.Persister) error {
m.Warnf("finding files: %v", err)
}

// If useThreadPool is enabled, start the worker threads
if useThreadPool.IsEnabled() {
m.once.Do(func() {
m.startConsumers(ctx)
})
}

// Start polling goroutine
m.startPoller(ctx)

Expand All @@ -71,6 +90,10 @@ func (m *Manager) Start(persister operator.Persister) error {
func (m *Manager) Stop() error {
m.cancel()
m.wg.Wait()
if useThreadPool.IsEnabled() {
m.stopConsumers()
}

m.roller.cleanup()
for _, reader := range m.knownFiles {
reader.Close()
Expand Down Expand Up @@ -103,6 +126,15 @@ func (m *Manager) startPoller(ctx context.Context) {

// poll checks all the watched paths for new entries
func (m *Manager) poll(ctx context.Context) {
if useThreadPool.IsEnabled() {
m.pollConcurrent(ctx)
} else {
m.pollRegular(ctx)
}
}

// poll checks all the watched paths for new entries
func (m *Manager) pollRegular(ctx context.Context) {
// Increment the generation on all known readers
// This is done here because the next generation is about to start
for i := 0; i < len(m.knownFiles); i++ {
Expand Down
28 changes: 28 additions & 0 deletions pkg/stanza/fileconsumer/file_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,20 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/testutil"
)

func TestMain(m *testing.M) {
// Run once with thread pool featuregate enabled
featuregate.GlobalRegistry().Set(useThreadPool.ID(), true) //nolint:all
if code := m.Run(); code > 0 {
os.Exit(code)
}
featuregate.GlobalRegistry().Set(useThreadPool.ID(), false) //nolint:all

// Run once with thread pool featuregate disabled
if code := m.Run(); code > 0 {
os.Exit(code)
}
}

// TestDefaultBehaviors
// - Files are read starting from the end.
// - Logs are tokenized based on newlines.
Expand Down Expand Up @@ -1383,6 +1397,7 @@ func TestDeleteAfterRead(t *testing.T) {
cfg.DeleteAfterRead = true
emitCalls := make(chan *emitParams, totalLines)
operator, _ := buildTestManager(t, cfg, withEmitChan(emitCalls))
operator.persister = testutil.NewMockPersister("test")

operator.poll(context.Background())
actualTokens = append(actualTokens, waitForNTokens(t, emitCalls, totalLines)...)
Expand Down Expand Up @@ -1572,6 +1587,14 @@ func TestDeleteAfterRead_SkipPartials(t *testing.T) {

// Stop consuming before long file has been fully consumed
cancel()

// Stop the worker threads
// Following section is a no-op if threadpool feature gate is disabled, so no need to check explicitly
operator.cancel()
if operator.readerChan != nil {
close(operator.readerChan)
}
operator.workerWg.Wait()
wg.Wait()

// short file was fully consumed and should have been deleted
Expand Down Expand Up @@ -1654,6 +1677,11 @@ func TestHeaderPersistanceInHeader(t *testing.T) {
// one poll operation occurs between now and when we stop.
op1.poll(context.Background())

// for threadpool, as the poll is asynchronous, allow it to complete one poll cycle
if useThreadPool.IsEnabled() {
time.Sleep(500 * time.Millisecond)
}

require.NoError(t, op1.Stop())

writeString(t, temp, "|headerField2: headerValue2\nlog line\n")
Expand Down
Loading