Skip to content

Commit

Permalink
[chore]: enable partially thelper linter
Browse files Browse the repository at this point in the history
Signed-off-by: Matthieu MOREL <[email protected]>
  • Loading branch information
mmorel-35 committed Dec 14, 2024
1 parent 4c33430 commit d0a98ac
Show file tree
Hide file tree
Showing 13 changed files with 120 additions and 110 deletions.
10 changes: 10 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,15 @@ linters-settings:
- suite-subtest-run
- encoded-compare # has false positives that cannot be fixed with testifylint-fix
enable-all: true
thelper:
test:
begin: false
benchmark:
begin: false
tb:
begin: false
fuzz:
begin: false

linters:
enable:
Expand All @@ -156,6 +165,7 @@ linters:
- staticcheck
- tenv
- testifylint
- thelper
- unconvert
- unparam
- unused
Expand Down
52 changes: 26 additions & 26 deletions connector/exceptionsconnector/connector_metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,8 @@ func newTestMetricsConnector(mcon consumer.Metrics, defaultNullValue *string, lo
}

// verifyConsumeMetricsInputCumulative expects one accumulation of metrics, and marked as cumulative
func verifyConsumeMetricsInputCumulative(t testing.TB, input pmetric.Metrics) bool {
return verifyConsumeMetricsInput(t, input, 1)
func verifyConsumeMetricsInputCumulative(tb testing.TB, input pmetric.Metrics) bool {
return verifyConsumeMetricsInput(tb, input, 1)
}

func verifyBadMetricsOkay(_ testing.TB, _ pmetric.Metrics) bool {
Expand All @@ -165,50 +165,50 @@ func verifyBadMetricsOkay(_ testing.TB, _ pmetric.Metrics) bool {
// numCumulativeConsumptions acts as a multiplier for the values, since the cumulative metrics are additive.
func verifyMultipleCumulativeConsumptions() func(t testing.TB, input pmetric.Metrics) bool {
numCumulativeConsumptions := 0
return func(t testing.TB, input pmetric.Metrics) bool {
return func(tb testing.TB, input pmetric.Metrics) bool {
numCumulativeConsumptions++
return verifyConsumeMetricsInput(t, input, numCumulativeConsumptions)
return verifyConsumeMetricsInput(tb, input, numCumulativeConsumptions)
}
}

// verifyConsumeMetricsInput verifies the input of the ConsumeMetrics call from this connector.
// This is the best point to verify the computed metrics from spans are as expected.
func verifyConsumeMetricsInput(t testing.TB, input pmetric.Metrics, numCumulativeConsumptions int) bool {
require.Equal(t, 3, input.DataPointCount(), "Should be 1 for each generated span")
func verifyConsumeMetricsInput(tb testing.TB, input pmetric.Metrics, numCumulativeConsumptions int) bool {
require.Equal(tb, 3, input.DataPointCount(), "Should be 1 for each generated span")

rm := input.ResourceMetrics()
require.Equal(t, 1, rm.Len())
require.Equal(tb, 1, rm.Len())

ilm := rm.At(0).ScopeMetrics()
require.Equal(t, 1, ilm.Len())
assert.Equal(t, "exceptionsconnector", ilm.At(0).Scope().Name())
require.Equal(tb, 1, ilm.Len())
assert.Equal(tb, "exceptionsconnector", ilm.At(0).Scope().Name())

m := ilm.At(0).Metrics()
require.Equal(t, 1, m.Len())
require.Equal(tb, 1, m.Len())

seenMetricIDs := make(map[metricID]bool)
// The first 3 data points are for call counts.
assert.Equal(t, "exceptions", m.At(0).Name())
assert.True(t, m.At(0).Sum().IsMonotonic())
assert.Equal(tb, "exceptions", m.At(0).Name())
assert.True(tb, m.At(0).Sum().IsMonotonic())
callsDps := m.At(0).Sum().DataPoints()
require.Equal(t, 3, callsDps.Len())
require.Equal(tb, 3, callsDps.Len())
for dpi := 0; dpi < 3; dpi++ {
dp := callsDps.At(dpi)
assert.Equal(t, int64(numCumulativeConsumptions), dp.IntValue(), "There should only be one metric per Service/kind combination")
assert.NotZero(t, dp.StartTimestamp(), "StartTimestamp should be set")
assert.NotZero(t, dp.Timestamp(), "Timestamp should be set")
verifyMetricLabels(dp, t, seenMetricIDs)
assert.Equal(tb, int64(numCumulativeConsumptions), dp.IntValue(), "There should only be one metric per Service/kind combination")
assert.NotZero(tb, dp.StartTimestamp(), "StartTimestamp should be set")
assert.NotZero(tb, dp.Timestamp(), "Timestamp should be set")
verifyMetricLabels(tb, dp, seenMetricIDs)

assert.Equal(t, 1, dp.Exemplars().Len())
assert.Equal(tb, 1, dp.Exemplars().Len())
exemplar := dp.Exemplars().At(0)
assert.NotZero(t, exemplar.Timestamp())
assert.NotZero(t, exemplar.TraceID())
assert.NotZero(t, exemplar.SpanID())
assert.NotZero(tb, exemplar.Timestamp())
assert.NotZero(tb, exemplar.TraceID())
assert.NotZero(tb, exemplar.SpanID())
}
return true
}

func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metricID]bool) {
func verifyMetricLabels(tb testing.TB, dp metricDataPoint, seenMetricIDs map[metricID]bool) {
mID := metricID{}
wantDimensions := map[string]pcommon.Value{
stringAttrName: pcommon.NewValueStr("stringAttrValue"),
Expand All @@ -233,17 +233,17 @@ func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metr
case statusCodeKey:
mID.statusCode = v.Str()
case notInSpanAttrName1:
assert.Fail(t, notInSpanAttrName1+" should not be in this metric")
assert.Fail(tb, notInSpanAttrName1+" should not be in this metric")
default:
assert.Equal(t, wantDimensions[k], v)
assert.Equal(tb, wantDimensions[k], v)
delete(wantDimensions, k)
}
return true
})
assert.Empty(t, wantDimensions, "Did not see all expected dimensions in metric. Missing: ", wantDimensions)
assert.Empty(tb, wantDimensions, "Did not see all expected dimensions in metric. Missing: ", wantDimensions)

// Service/kind should be a unique metric.
assert.False(t, seenMetricIDs[mID])
assert.False(tb, seenMetricIDs[mID])
seenMetricIDs[mID] = true
}

Expand Down
94 changes: 47 additions & 47 deletions connector/spanmetricsconnector/connector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,46 +122,46 @@ func verifyExemplarsExist(t testing.TB, input pmetric.Metrics) bool {
}

// verifyConsumeMetricsInputCumulative expects one accumulation of metrics, and marked as cumulative
func verifyConsumeMetricsInputCumulative(t testing.TB, input pmetric.Metrics) bool {
return verifyConsumeMetricsInput(t, input, pmetric.AggregationTemporalityCumulative, 1)
func verifyConsumeMetricsInputCumulative(tb testing.TB, input pmetric.Metrics) bool {
return verifyConsumeMetricsInput(tb, input, pmetric.AggregationTemporalityCumulative, 1)
}

func verifyBadMetricsOkay(_ testing.TB, _ pmetric.Metrics) bool {
return true // Validating no exception
}

// verifyConsumeMetricsInputDelta expects one accumulation of metrics, and marked as delta
func verifyConsumeMetricsInputDelta(t testing.TB, input pmetric.Metrics) bool {
return verifyConsumeMetricsInput(t, input, pmetric.AggregationTemporalityDelta, 1)
func verifyConsumeMetricsInputDelta(tb testing.TB, input pmetric.Metrics) bool {
return verifyConsumeMetricsInput(tb, input, pmetric.AggregationTemporalityDelta, 1)
}

// verifyMultipleCumulativeConsumptions expects the amount of accumulations as kept track of by numCumulativeConsumptions.
// numCumulativeConsumptions acts as a multiplier for the values, since the cumulative metrics are additive.
func verifyMultipleCumulativeConsumptions() func(t testing.TB, input pmetric.Metrics) bool {
func verifyMultipleCumulativeConsumptions() func(tb testing.TB, input pmetric.Metrics) bool {
numCumulativeConsumptions := 0
return func(t testing.TB, input pmetric.Metrics) bool {
return func(tb testing.TB, input pmetric.Metrics) bool {
numCumulativeConsumptions++
return verifyConsumeMetricsInput(t, input, pmetric.AggregationTemporalityCumulative, numCumulativeConsumptions)
return verifyConsumeMetricsInput(tb, input, pmetric.AggregationTemporalityCumulative, numCumulativeConsumptions)
}
}

// verifyConsumeMetricsInput verifies the input of the ConsumeMetrics call from this connector.
// This is the best point to verify the computed metrics from spans are as expected.
func verifyConsumeMetricsInput(t testing.TB, input pmetric.Metrics, expectedTemporality pmetric.AggregationTemporality, numCumulativeConsumptions int) bool {
require.Equal(t, 6, input.DataPointCount(),
func verifyConsumeMetricsInput(tb testing.TB, input pmetric.Metrics, expectedTemporality pmetric.AggregationTemporality, numCumulativeConsumptions int) bool {
require.Equal(tb, 6, input.DataPointCount(),
"Should be 3 for each of call count and latency split into two resource scopes defined by: "+
"service-a: service-a (server kind) -> service-a (client kind) and "+
"service-b: service-b (service kind)",
)

require.Equal(t, 2, input.ResourceMetrics().Len())
require.Equal(tb, 2, input.ResourceMetrics().Len())

for i := 0; i < input.ResourceMetrics().Len(); i++ {
rm := input.ResourceMetrics().At(i)

var numDataPoints int
val, ok := rm.Resource().Attributes().Get(serviceNameKey)
require.True(t, ok)
require.True(tb, ok)
serviceName := val.AsString()
if serviceName == "service-a" {
numDataPoints = 2
Expand All @@ -170,68 +170,68 @@ func verifyConsumeMetricsInput(t testing.TB, input pmetric.Metrics, expectedTemp
}

ilm := rm.ScopeMetrics()
require.Equal(t, 1, ilm.Len())
assert.Equal(t, "spanmetricsconnector", ilm.At(0).Scope().Name())
require.Equal(tb, 1, ilm.Len())
assert.Equal(tb, "spanmetricsconnector", ilm.At(0).Scope().Name())

m := ilm.At(0).Metrics()
require.Equal(t, 2, m.Len(), "only sum and histogram metric types generated")
require.Equal(tb, 2, m.Len(), "only sum and histogram metric types generated")

// validate calls - sum metrics
metric := m.At(0)
assert.Equal(t, metricNameCalls, metric.Name())
assert.Equal(t, expectedTemporality, metric.Sum().AggregationTemporality())
assert.True(t, metric.Sum().IsMonotonic())
assert.Equal(tb, metricNameCalls, metric.Name())
assert.Equal(tb, expectedTemporality, metric.Sum().AggregationTemporality())
assert.True(tb, metric.Sum().IsMonotonic())

seenMetricIDs := make(map[metricID]bool)
callsDps := metric.Sum().DataPoints()
require.Equal(t, numDataPoints, callsDps.Len())
require.Equal(tb, numDataPoints, callsDps.Len())
for dpi := 0; dpi < numDataPoints; dpi++ {
dp := callsDps.At(dpi)
assert.Equal(t,
assert.Equal(tb,
int64(numCumulativeConsumptions),
dp.IntValue(),
"There should only be one metric per Service/name/kind combination",
)
assert.NotZero(t, dp.StartTimestamp(), "StartTimestamp should be set")
assert.NotZero(t, dp.Timestamp(), "Timestamp should be set")
verifyMetricLabels(dp, t, seenMetricIDs)
assert.NotZero(tb, dp.StartTimestamp(), "StartTimestamp should be set")
assert.NotZero(tb, dp.Timestamp(), "Timestamp should be set")
verifyMetricLabels(tb, dp, seenMetricIDs)
}

// validate latency - histogram metrics
metric = m.At(1)
assert.Equal(t, metricNameDuration, metric.Name())
assert.Equal(t, defaultUnit.String(), metric.Unit())
assert.Equal(tb, metricNameDuration, metric.Name())
assert.Equal(tb, defaultUnit.String(), metric.Unit())

if metric.Type() == pmetric.MetricTypeExponentialHistogram {
hist := metric.ExponentialHistogram()
assert.Equal(t, expectedTemporality, hist.AggregationTemporality())
verifyExponentialHistogramDataPoints(t, hist.DataPoints(), numDataPoints, numCumulativeConsumptions)
assert.Equal(tb, expectedTemporality, hist.AggregationTemporality())
verifyExponentialHistogramDataPoints(tb, hist.DataPoints(), numDataPoints, numCumulativeConsumptions)
} else {
hist := metric.Histogram()
assert.Equal(t, expectedTemporality, hist.AggregationTemporality())
verifyExplicitHistogramDataPoints(t, hist.DataPoints(), numDataPoints, numCumulativeConsumptions)
assert.Equal(tb, expectedTemporality, hist.AggregationTemporality())
verifyExplicitHistogramDataPoints(tb, hist.DataPoints(), numDataPoints, numCumulativeConsumptions)
}
}
return true
}

func verifyExplicitHistogramDataPoints(t testing.TB, dps pmetric.HistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) {
func verifyExplicitHistogramDataPoints(tb testing.TB, dps pmetric.HistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) {
seenMetricIDs := make(map[metricID]bool)
require.Equal(t, numDataPoints, dps.Len())
require.Equal(tb, numDataPoints, dps.Len())
for dpi := 0; dpi < numDataPoints; dpi++ {
dp := dps.At(dpi)
assert.Equal(
t,
tb,
sampleDuration*float64(numCumulativeConsumptions),
dp.Sum(),
"Should be a 11ms duration measurement, multiplied by the number of stateful accumulations.")
assert.NotZero(t, dp.Timestamp(), "Timestamp should be set")
assert.NotZero(tb, dp.Timestamp(), "Timestamp should be set")

// Verify bucket counts.

// The bucket counts should be 1 greater than the explicit bounds as documented in:
// https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto.
assert.Equal(t, dp.ExplicitBounds().Len()+1, dp.BucketCounts().Len())
assert.Equal(tb, dp.ExplicitBounds().Len()+1, dp.BucketCounts().Len())

// Find the bucket index where the 11ms duration should belong in.
var foundDurationIndex int
Expand All @@ -248,31 +248,31 @@ func verifyExplicitHistogramDataPoints(t testing.TB, dps pmetric.HistogramDataPo
if bi == foundDurationIndex {
wantBucketCount = uint64(numCumulativeConsumptions)
}
assert.Equal(t, wantBucketCount, dp.BucketCounts().At(bi))
assert.Equal(tb, wantBucketCount, dp.BucketCounts().At(bi))
}
verifyMetricLabels(dp, t, seenMetricIDs)
verifyMetricLabels(tb, dp, seenMetricIDs)
}
}

func verifyExponentialHistogramDataPoints(t testing.TB, dps pmetric.ExponentialHistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) {
func verifyExponentialHistogramDataPoints(tb testing.TB, dps pmetric.ExponentialHistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) {
seenMetricIDs := make(map[metricID]bool)
require.Equal(t, numDataPoints, dps.Len())
require.Equal(tb, numDataPoints, dps.Len())
for dpi := 0; dpi < numDataPoints; dpi++ {
dp := dps.At(dpi)
assert.Equal(
t,
tb,
sampleDuration*float64(numCumulativeConsumptions),
dp.Sum(),
"Should be a 11ms duration measurement, multiplied by the number of stateful accumulations.")
assert.Equal(t, uint64(numCumulativeConsumptions), dp.Count())
assert.Equal(t, []uint64{uint64(numCumulativeConsumptions)}, dp.Positive().BucketCounts().AsRaw())
assert.NotZero(t, dp.Timestamp(), "Timestamp should be set")
assert.Equal(tb, uint64(numCumulativeConsumptions), dp.Count())
assert.Equal(tb, []uint64{uint64(numCumulativeConsumptions)}, dp.Positive().BucketCounts().AsRaw())
assert.NotZero(tb, dp.Timestamp(), "Timestamp should be set")

verifyMetricLabels(dp, t, seenMetricIDs)
verifyMetricLabels(tb, dp, seenMetricIDs)
}
}

func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metricID]bool) {
func verifyMetricLabels(tb testing.TB, dp metricDataPoint, seenMetricIDs map[metricID]bool) {
mID := metricID{}
wantDimensions := map[string]pcommon.Value{
stringAttrName: pcommon.NewValueStr("stringAttrValue"),
Expand All @@ -296,17 +296,17 @@ func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metr
case statusCodeKey:
mID.statusCode = v.Str()
case notInSpanAttrName1:
assert.Fail(t, notInSpanAttrName1+" should not be in this metric")
assert.Fail(tb, notInSpanAttrName1+" should not be in this metric")
default:
assert.Equal(t, wantDimensions[k], v)
assert.Equal(tb, wantDimensions[k], v)
delete(wantDimensions, k)
}
return true
})
assert.Empty(t, wantDimensions, "Did not see all expected dimensions in metric. Missing: ", wantDimensions)
assert.Empty(tb, wantDimensions, "Did not see all expected dimensions in metric. Missing: ", wantDimensions)

// Service/name/kind should be a unique metric.
assert.False(t, seenMetricIDs[mID])
assert.False(tb, seenMetricIDs[mID])
seenMetricIDs[mID] = true
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func TestMetricDataToLogService(t *testing.T) {
t.Errorf("Failed load log key value pairs from file, error: %v", err)
return
}
assert.Equal(t, len(wantLogs), len(gotLogs))
assert.Len(t, gotLogs, len(wantLogs))
for j := 0; j < len(gotLogs); j++ {
sort.Sort(logKeyValuePairs(gotLogPairs[j]))
sort.Sort(logKeyValuePairs(wantLogs[j]))
Expand Down
2 changes: 1 addition & 1 deletion exporter/awsemfexporter/grouped_metric_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func TestAddToGroupedMetric(t *testing.T) {

assert.Len(t, groupedMetrics, 1)
for _, v := range groupedMetrics {
assert.Equal(t, len(tc.expectedMetricInfo), len(v.metrics))
assert.Len(t, v.metrics, len(tc.expectedMetricInfo))
assert.Equal(t, tc.expectedMetricInfo, v.metrics)
assert.Len(t, v.labels, 2)
assert.Equal(t, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, tc.expectedMetricType), v.metadata)
Expand Down
Loading

0 comments on commit d0a98ac

Please sign in to comment.