Skip to content

Commit

Permalink
[chore]: use testify instead of testing.Fatal or testing.Error in pro…
Browse files Browse the repository at this point in the history
…cessor (#36374)
  • Loading branch information
mmorel-35 authored Dec 4, 2024
1 parent 386b75e commit ed96f74
Show file tree
Hide file tree
Showing 16 changed files with 52 additions and 93 deletions.
3 changes: 1 addition & 2 deletions processor/attributesprocessor/factory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,7 @@ func TestFactoryCreateTraces_InvalidActions(t *testing.T) {
{Key: "http.status_code", ConvertedType: "array", Action: attraction.CONVERT},
}
ap2, err2 := factory.CreateTraces(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop())
assert.Error(t, err2)
assert.Equal(t, "error creating AttrProc due to invalid value \"array\" in field \"converted_type\" for action \"convert\" at the 0-th action", err2.Error())
require.EqualError(t, err2, "error creating AttrProc due to invalid value \"array\" in field \"converted_type\" for action \"convert\" at the 0-th action")
assert.Nil(t, ap2)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"strings"
"testing"

"github.com/stretchr/testify/assert"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"

Expand Down Expand Up @@ -137,9 +138,7 @@ func TestMetricIdentity_IsFloatVal(t *testing.T) {
MetricType: pmetric.MetricTypeSum,
MetricValueType: tt.fields.MetricValueType,
}
if got := mi.IsFloatVal(); got != tt.want {
t.Errorf("MetricIdentity.IsFloatVal() = %v, want %v", got, tt.want)
}
assert.Equal(t, tt.want, mi.IsFloatVal(), "MetricIdentity.IsFloatVal()")
})
}
}
Expand Down Expand Up @@ -204,9 +203,7 @@ func TestMetricIdentity_IsSupportedMetricType(t *testing.T) {
Attributes: pcommon.NewMap(),
MetricType: tt.fields.MetricType,
}
if got := mi.IsSupportedMetricType(); got != tt.want {
t.Errorf("MetricIdentity.IsSupportedMetricType() = %v, want %v", got, tt.want)
}
assert.Equal(t, tt.want, mi.IsSupportedMetricType(), "MetricIdentity.IsSupportedMetricType()")
})
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -242,9 +242,7 @@ func TestMetricTracker_Convert(t *testing.T) {
IntValue: 100,
},
})
if valid {
t.Error("Expected invalid for non cumulative metric")
}
assert.False(t, valid, "Expected invalid for non cumulative metric")
})
}

Expand Down Expand Up @@ -338,7 +336,5 @@ func Test_metricTracker_sweeper(t *testing.T) {
cancel()
for range sweepEvent { // nolint
}
if !closed.Load() {
t.Errorf("Sweeper did not terminate.")
}
assert.True(t, closed.Load(), "Sweeper did not terminate.")
}
4 changes: 1 addition & 3 deletions processor/cumulativetodeltaprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -623,9 +623,7 @@ func BenchmarkConsumeMetrics(b *testing.B) {
}
cfg := createDefaultConfig().(*Config)
p, err := createMetricsProcessor(context.Background(), params, cfg, c)
if err != nil {
b.Fatal(err)
}
require.NoError(b, err)

metrics := pmetric.NewMetrics()
rms := metrics.ResourceMetrics().AppendEmpty()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ import (
"strings"
"testing"

"github.com/stretchr/testify/require"

"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/testing/testar/crlf"
)

Expand Down Expand Up @@ -71,16 +73,12 @@ func TestCRLF(t *testing.T) {
}

err := Read(data, &into)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)

must(t, into.String, "foobar\n")
}

func must[T string | int](t *testing.T, v, want T) {
t.Helper()
if v != want {
t.Fatalf("got '%q' != '%q' want", v, want)
}
require.Equal(t, want, v, "got '%q' != '%q' want", v, want)
}
4 changes: 1 addition & 3 deletions processor/deltatocumulativeprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,5 @@ func TestTelemetry(t *testing.T) {
require.NoError(t, err)

var rm metricdata.ResourceMetrics
if err := tt.reader.Collect(context.Background(), &rm); err != nil {
t.Fatal(err)
}
require.NoError(t, tt.reader.Collect(context.Background(), &rm))
}
10 changes: 3 additions & 7 deletions processor/k8sattributesprocessor/internal/kube/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,7 @@ func nodeAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) {

func TestDefaultClientset(t *testing.T) {
c, err := New(componenttest.NewNopTelemetrySettings(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, nil, nil, nil, nil, false, 10*time.Second)
assert.Error(t, err)
assert.Equal(t, "invalid authType for kubernetes: ", err.Error())
require.EqualError(t, err, "invalid authType for kubernetes: ")
assert.Nil(t, c)

c, err = New(componenttest.NewNopTelemetrySettings(), k8sconfig.APIConfig{}, ExtractionRules{}, Filters{}, []Association{}, Excludes{}, newFakeAPIClientset, nil, nil, nil, false, 10*time.Second)
Expand Down Expand Up @@ -194,8 +193,7 @@ func TestConstructorErrors(t *testing.T) {
}
c, err := New(componenttest.NewNopTelemetrySettings(), apiCfg, er, ff, []Association{}, Excludes{}, clientProvider, NewFakeInformer, NewFakeNamespaceInformer, nil, false, 10*time.Second)
assert.Nil(t, c)
assert.Error(t, err)
assert.Equal(t, "error creating k8s client", err.Error())
require.EqualError(t, err, "error creating k8s client")
assert.Equal(t, apiCfg, gotAPIConfig)
})
}
Expand Down Expand Up @@ -1834,9 +1832,7 @@ func Test_extractField(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.args.r.extractField(tt.args.v); got != tt.want {
t.Errorf("extractField() = %v, want %v", got, tt.want)
}
assert.Equal(t, tt.want, tt.args.r.extractField(tt.args.v), "extractField()")
})
}
}
Expand Down
26 changes: 10 additions & 16 deletions processor/k8sattributesprocessor/options_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,11 @@ func TestWithAPIConfig(t *testing.T) {
p := &kubernetesprocessor{}
apiConfig := k8sconfig.APIConfig{AuthType: "test-auth-type"}
err := withAPIConfig(apiConfig)(p)
assert.Error(t, err)
assert.Equal(t, "invalid authType for kubernetes: test-auth-type", err.Error())
require.EqualError(t, err, "invalid authType for kubernetes: test-auth-type")

apiConfig = k8sconfig.APIConfig{AuthType: "kubeConfig"}
err = withAPIConfig(apiConfig)(p)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, apiConfig, p.apiConfig)
}

Expand Down Expand Up @@ -200,15 +199,12 @@ func TestWithExtractAnnotations(t *testing.T) {
p := &kubernetesprocessor{}
opt := withExtractAnnotations(tt.args...)
err := opt(p)
if tt.wantError == "" {
assert.NoError(t, err)
if tt.wantError != "" {
require.EqualError(t, err, tt.wantError)
} else {
assert.Error(t, err)
assert.Equal(t, tt.wantError, err.Error())
return
require.NoError(t, err)
assert.Equal(t, tt.want, p.rules.Annotations)
}
got := p.rules.Annotations
assert.Equal(t, tt.want, got)
})
}
}
Expand Down Expand Up @@ -342,14 +338,12 @@ func TestWithExtractLabels(t *testing.T) {
p := &kubernetesprocessor{}
opt := withExtractLabels(tt.args...)
err := opt(p)
if tt.wantError == "" {
assert.NoError(t, err)
if tt.wantError != "" {
require.EqualError(t, err, tt.wantError)
} else {
assert.Error(t, err)
assert.Equal(t, tt.wantError, err.Error())
return
require.NoError(t, err)
assert.Equal(t, tt.want, p.rules.Labels)
}
assert.Equal(t, tt.want, p.rules.Labels)
})
}
}
Expand Down
6 changes: 2 additions & 4 deletions processor/k8sattributesprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,8 +271,7 @@ func TestProcessorBadClientProvider(t *testing.T) {
}

newMultiTest(t, NewFactory().CreateDefaultConfig(), func(err error) {
require.Error(t, err)
assert.Equal(t, "bad client error", err.Error())
require.EqualError(t, err, "bad client error")
}, withKubeClientProvider(clientProvider))
}

Expand Down Expand Up @@ -1520,8 +1519,7 @@ func TestRealClient(t *testing.T) {
t,
NewFactory().CreateDefaultConfig(),
func(err error) {
require.Error(t, err)
assert.Equal(t, "unable to load k8s config, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined", err.Error())
require.EqualError(t, err, "unable to load k8s config, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
},
withKubeClientProvider(kubeClientProvider),
withAPIConfig(k8sconfig.APIConfig{AuthType: "none"}),
Expand Down
4 changes: 2 additions & 2 deletions processor/metricstransformprocessor/factory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ func TestFactory_validateConfiguration(t *testing.T) {
},
}
err := validateConfiguration(&v1)
assert.Equal(t, "operation 1: missing required field \"new_label\" while \"action\" is add_label", err.Error())
assert.EqualError(t, err, "operation 1: missing required field \"new_label\" while \"action\" is add_label")

v2 := Config{
Transforms: []transform{
Expand All @@ -186,7 +186,7 @@ func TestFactory_validateConfiguration(t *testing.T) {
}

err = validateConfiguration(&v2)
assert.Equal(t, "operation 1: missing required field \"new_value\" while \"action\" is add_label", err.Error())
assert.EqualError(t, err, "operation 1: missing required field \"new_value\" while \"action\" is add_label")
}

func TestCreateProcessorsFilledData(t *testing.T) {
Expand Down
4 changes: 2 additions & 2 deletions processor/probabilisticsamplerprocessor/logsprocessor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ func TestLogsSamplingState(t *testing.T) {
} else {
require.Len(t, observed.All(), 1, "should have one log: %v", observed.All())
require.Contains(t, observed.All()[0].Message, "logs sampler")
require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), tt.log)
require.ErrorContains(t, observed.All()[0].Context[0].Interface.(error), tt.log)
}

sampledData := sink.AllLogs()
Expand Down Expand Up @@ -513,7 +513,7 @@ func TestLogsMissingRandomness(t *testing.T) {
// pct==0 bypasses the randomness check
require.Len(t, observed.All(), 1, "should have one log: %v", observed.All())
require.Contains(t, observed.All()[0].Message, "logs sampler")
require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), "missing randomness")
require.ErrorContains(t, observed.All()[0].Context[0].Interface.(error), "missing randomness")
} else {
require.Empty(t, observed.All(), "should have no logs: %v", observed.All())
}
Expand Down
34 changes: 13 additions & 21 deletions processor/probabilisticsamplerprocessor/tracesprocessor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,25 +151,20 @@ func Test_tracesamplerprocessor_SamplingPercentageRange(t *testing.T) {
sink := newAssertTraces(t, testSvcName)

tsp, err := newTracesProcessor(context.Background(), processortest.NewNopSettings(), tt.cfg, sink)
if err != nil {
t.Errorf("error when creating traceSamplerProcessor: %v", err)
return
}
require.NoError(t, err, "error when creating traceSamplerProcessor")
for _, td := range genRandomTestData(tt.numBatches, tt.numTracesPerBatch, testSvcName, 1) {
assert.NoError(t, tsp.ConsumeTraces(context.Background(), td))
}
sampled := sink.spanCount
actualPercentageSamplingPercentage := float32(sampled) / float32(tt.numBatches*tt.numTracesPerBatch) * 100.0
delta := math.Abs(float64(actualPercentageSamplingPercentage - tt.cfg.SamplingPercentage))
if delta > tt.acceptableDelta {
t.Errorf(
"got %f percentage sampling rate, want %f (allowed delta is %f but got %f)",
actualPercentageSamplingPercentage,
tt.cfg.SamplingPercentage,
tt.acceptableDelta,
delta,
)
}
assert.LessOrEqualf(t, delta, tt.acceptableDelta,
"got %f percentage sampling rate, want %f (allowed delta is %f but got %f)",
actualPercentageSamplingPercentage,
tt.cfg.SamplingPercentage,
tt.acceptableDelta,
delta,
)
})
}
}
Expand Down Expand Up @@ -212,10 +207,7 @@ func Test_tracesamplerprocessor_SamplingPercentageRange_MultipleResourceSpans(t

sink := new(consumertest.TracesSink)
tsp, err := newTracesProcessor(context.Background(), processortest.NewNopSettings(), tt.cfg, sink)
if err != nil {
t.Errorf("error when creating traceSamplerProcessor: %v", err)
return
}
require.NoError(t, err, "error when creating traceSamplerProcessor")

for _, td := range genRandomTestData(tt.numBatches, tt.numTracesPerBatch, testSvcName, tt.resourceSpanPerTrace) {
assert.NoError(t, tsp.ConsumeTraces(context.Background(), td))
Expand Down Expand Up @@ -285,7 +277,7 @@ func Test_tracessamplerprocessor_MissingRandomness(t *testing.T) {
// pct==0 bypasses the randomness check
require.Len(t, observed.All(), 1, "should have one log: %v", observed.All())
require.Contains(t, observed.All()[0].Message, "traces sampler")
require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), "missing randomness")
require.ErrorContains(t, observed.All()[0].Context[0].Interface.(error), "missing randomness")
} else {
require.Empty(t, observed.All(), "should have no logs: %v", observed.All())
}
Expand Down Expand Up @@ -905,7 +897,7 @@ func Test_tracesamplerprocessor_TraceState(t *testing.T) {
} else {
require.Len(t, observed.All(), 1, "should have one log: %v", observed.All())
require.Contains(t, observed.All()[0].Message, "traces sampler")
require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), tt.log)
require.ErrorContains(t, observed.All()[0].Context[0].Interface.(error), tt.log)
}
})
}
Expand Down Expand Up @@ -1026,10 +1018,10 @@ func Test_tracesamplerprocessor_TraceStateErrors(t *testing.T) {

require.Len(t, observed.All(), 1, "should have one log: %v", observed.All())
if observed.All()[0].Message == "trace sampler" {
require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), expectMessage)
require.ErrorContains(t, observed.All()[0].Context[0].Interface.(error), expectMessage)
} else {
require.Contains(t, observed.All()[0].Message, "traces sampler")
require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), expectMessage)
require.ErrorContains(t, observed.All()[0].Context[0].Interface.(error), expectMessage)
}
})
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,7 @@ func BenchmarkConcurrentEnqueue(b *testing.B) {
ids := generateSequentialIDs(1)
batcher, err := New(10, 100, uint64(4*runtime.NumCPU()))
defer batcher.Stop()
if err != nil {
b.Fatalf("Failed to create Batcher: %v", err)
}
require.NoError(b, err, "Failed to create Batcher")

ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -217,9 +217,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) {
require.NoError(t, err, "Failed to evaluate composite policy: %v", err)

expected := Sampled
if decision != expected {
t.Fatalf("Incorrect decision by composite policy evaluator: expected %v, actual %v", expected, decision)
}
require.Equal(t, expected, decision, "Incorrect decision by composite policy evaluator: expected %v, actual %v", expected, decision)
}

// Now we hit the rate limit for second subpolicy, so subsequent evaluations should result in NotSampled
Expand All @@ -228,9 +226,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) {
require.NoError(t, err, "Failed to evaluate composite policy: %v", err)

expected := NotSampled
if decision != expected {
t.Fatalf("Incorrect decision by composite policy evaluator: expected %v, actual %v", expected, decision)
}
require.Equal(t, expected, decision, "Incorrect decision by composite policy evaluator: expected %v, actual %v", expected, decision)
}

// Let the time advance by one second.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -514,17 +514,17 @@ func Test_aggregateOnAttributeValues(t *testing.T) {

func Test_createAggregateOnAttributeValueFunction(t *testing.T) {
// invalid input arguments
_, e := createAggregateOnAttributeValueFunction(ottl.FunctionContext{}, nil)
require.Contains(t, e.Error(), "AggregateOnAttributeValueFactory args must be of type *AggregateOnAttributeValueArguments")
_, err := createAggregateOnAttributeValueFunction(ottl.FunctionContext{}, nil)
require.ErrorContains(t, err, "AggregateOnAttributeValueFactory args must be of type *AggregateOnAttributeValueArguments")

// invalid aggregation function
_, e = createAggregateOnAttributeValueFunction(ottl.FunctionContext{}, &aggregateOnAttributeValueArguments{
_, err = createAggregateOnAttributeValueFunction(ottl.FunctionContext{}, &aggregateOnAttributeValueArguments{
AggregationFunction: "invalid",
Attribute: "attr",
Values: []string{"val"},
NewValue: "newVal",
})
require.Contains(t, e.Error(), "invalid aggregation function")
require.ErrorContains(t, err, "invalid aggregation function")
}

func getTestSumMetricMultipleAggregateOnAttributeValue() pmetric.Metric {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -751,8 +751,7 @@ func Test_convertExponentialHistToExplicitHist_validate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := convertExponentialHistToExplicitHist("random", tt.sliceExplicitBoundsArgs)
assert.Error(t, err)
assert.Contains(t, err.Error(), "explicit bounds cannot be empty")
assert.ErrorContains(t, err, "explicit bounds cannot be empty")
})
}
}

0 comments on commit ed96f74

Please sign in to comment.