diff --git a/.golangci.yml b/.golangci.yml index 28bfb9ead30a..8951500c9f83 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -134,6 +134,15 @@ linters-settings: - suite-subtest-run - encoded-compare # has false positives that cannot be fixed with testifylint-fix enable-all: true + thelper: + test: + begin: false + benchmark: + begin: false + tb: + begin: false + fuzz: + begin: false linters: enable: @@ -156,6 +165,7 @@ linters: - staticcheck - tenv - testifylint + - thelper - unconvert - unparam - unused diff --git a/connector/exceptionsconnector/connector_metrics_test.go b/connector/exceptionsconnector/connector_metrics_test.go index 98b12aec7149..4d26db2a6dd2 100644 --- a/connector/exceptionsconnector/connector_metrics_test.go +++ b/connector/exceptionsconnector/connector_metrics_test.go @@ -153,8 +153,8 @@ func newTestMetricsConnector(mcon consumer.Metrics, defaultNullValue *string, lo } // verifyConsumeMetricsInputCumulative expects one accumulation of metrics, and marked as cumulative -func verifyConsumeMetricsInputCumulative(t testing.TB, input pmetric.Metrics) bool { - return verifyConsumeMetricsInput(t, input, 1) +func verifyConsumeMetricsInputCumulative(tb testing.TB, input pmetric.Metrics) bool { + return verifyConsumeMetricsInput(tb, input, 1) } func verifyBadMetricsOkay(_ testing.TB, _ pmetric.Metrics) bool { @@ -165,50 +165,50 @@ func verifyBadMetricsOkay(_ testing.TB, _ pmetric.Metrics) bool { // numCumulativeConsumptions acts as a multiplier for the values, since the cumulative metrics are additive. func verifyMultipleCumulativeConsumptions() func(t testing.TB, input pmetric.Metrics) bool { numCumulativeConsumptions := 0 - return func(t testing.TB, input pmetric.Metrics) bool { + return func(tb testing.TB, input pmetric.Metrics) bool { numCumulativeConsumptions++ - return verifyConsumeMetricsInput(t, input, numCumulativeConsumptions) + return verifyConsumeMetricsInput(tb, input, numCumulativeConsumptions) } } // verifyConsumeMetricsInput verifies the input of the ConsumeMetrics call from this connector. // This is the best point to verify the computed metrics from spans are as expected. -func verifyConsumeMetricsInput(t testing.TB, input pmetric.Metrics, numCumulativeConsumptions int) bool { - require.Equal(t, 3, input.DataPointCount(), "Should be 1 for each generated span") +func verifyConsumeMetricsInput(tb testing.TB, input pmetric.Metrics, numCumulativeConsumptions int) bool { + require.Equal(tb, 3, input.DataPointCount(), "Should be 1 for each generated span") rm := input.ResourceMetrics() - require.Equal(t, 1, rm.Len()) + require.Equal(tb, 1, rm.Len()) ilm := rm.At(0).ScopeMetrics() - require.Equal(t, 1, ilm.Len()) - assert.Equal(t, "exceptionsconnector", ilm.At(0).Scope().Name()) + require.Equal(tb, 1, ilm.Len()) + assert.Equal(tb, "exceptionsconnector", ilm.At(0).Scope().Name()) m := ilm.At(0).Metrics() - require.Equal(t, 1, m.Len()) + require.Equal(tb, 1, m.Len()) seenMetricIDs := make(map[metricID]bool) // The first 3 data points are for call counts. - assert.Equal(t, "exceptions", m.At(0).Name()) - assert.True(t, m.At(0).Sum().IsMonotonic()) + assert.Equal(tb, "exceptions", m.At(0).Name()) + assert.True(tb, m.At(0).Sum().IsMonotonic()) callsDps := m.At(0).Sum().DataPoints() - require.Equal(t, 3, callsDps.Len()) + require.Equal(tb, 3, callsDps.Len()) for dpi := 0; dpi < 3; dpi++ { dp := callsDps.At(dpi) - assert.Equal(t, int64(numCumulativeConsumptions), dp.IntValue(), "There should only be one metric per Service/kind combination") - assert.NotZero(t, dp.StartTimestamp(), "StartTimestamp should be set") - assert.NotZero(t, dp.Timestamp(), "Timestamp should be set") - verifyMetricLabels(dp, t, seenMetricIDs) + assert.Equal(tb, int64(numCumulativeConsumptions), dp.IntValue(), "There should only be one metric per Service/kind combination") + assert.NotZero(tb, dp.StartTimestamp(), "StartTimestamp should be set") + assert.NotZero(tb, dp.Timestamp(), "Timestamp should be set") + verifyMetricLabels(tb, dp, seenMetricIDs) - assert.Equal(t, 1, dp.Exemplars().Len()) + assert.Equal(tb, 1, dp.Exemplars().Len()) exemplar := dp.Exemplars().At(0) - assert.NotZero(t, exemplar.Timestamp()) - assert.NotZero(t, exemplar.TraceID()) - assert.NotZero(t, exemplar.SpanID()) + assert.NotZero(tb, exemplar.Timestamp()) + assert.NotZero(tb, exemplar.TraceID()) + assert.NotZero(tb, exemplar.SpanID()) } return true } -func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metricID]bool) { +func verifyMetricLabels(tb testing.TB, dp metricDataPoint, seenMetricIDs map[metricID]bool) { mID := metricID{} wantDimensions := map[string]pcommon.Value{ stringAttrName: pcommon.NewValueStr("stringAttrValue"), @@ -233,17 +233,17 @@ func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metr case statusCodeKey: mID.statusCode = v.Str() case notInSpanAttrName1: - assert.Fail(t, notInSpanAttrName1+" should not be in this metric") + assert.Fail(tb, notInSpanAttrName1+" should not be in this metric") default: - assert.Equal(t, wantDimensions[k], v) + assert.Equal(tb, wantDimensions[k], v) delete(wantDimensions, k) } return true }) - assert.Empty(t, wantDimensions, "Did not see all expected dimensions in metric. Missing: ", wantDimensions) + assert.Empty(tb, wantDimensions, "Did not see all expected dimensions in metric. Missing: ", wantDimensions) // Service/kind should be a unique metric. - assert.False(t, seenMetricIDs[mID]) + assert.False(tb, seenMetricIDs[mID]) seenMetricIDs[mID] = true } diff --git a/connector/spanmetricsconnector/connector_test.go b/connector/spanmetricsconnector/connector_test.go index 5bd74c85f65a..16e63909a8b7 100644 --- a/connector/spanmetricsconnector/connector_test.go +++ b/connector/spanmetricsconnector/connector_test.go @@ -122,8 +122,8 @@ func verifyExemplarsExist(t testing.TB, input pmetric.Metrics) bool { } // verifyConsumeMetricsInputCumulative expects one accumulation of metrics, and marked as cumulative -func verifyConsumeMetricsInputCumulative(t testing.TB, input pmetric.Metrics) bool { - return verifyConsumeMetricsInput(t, input, pmetric.AggregationTemporalityCumulative, 1) +func verifyConsumeMetricsInputCumulative(tb testing.TB, input pmetric.Metrics) bool { + return verifyConsumeMetricsInput(tb, input, pmetric.AggregationTemporalityCumulative, 1) } func verifyBadMetricsOkay(_ testing.TB, _ pmetric.Metrics) bool { @@ -131,37 +131,37 @@ func verifyBadMetricsOkay(_ testing.TB, _ pmetric.Metrics) bool { } // verifyConsumeMetricsInputDelta expects one accumulation of metrics, and marked as delta -func verifyConsumeMetricsInputDelta(t testing.TB, input pmetric.Metrics) bool { - return verifyConsumeMetricsInput(t, input, pmetric.AggregationTemporalityDelta, 1) +func verifyConsumeMetricsInputDelta(tb testing.TB, input pmetric.Metrics) bool { + return verifyConsumeMetricsInput(tb, input, pmetric.AggregationTemporalityDelta, 1) } // verifyMultipleCumulativeConsumptions expects the amount of accumulations as kept track of by numCumulativeConsumptions. // numCumulativeConsumptions acts as a multiplier for the values, since the cumulative metrics are additive. -func verifyMultipleCumulativeConsumptions() func(t testing.TB, input pmetric.Metrics) bool { +func verifyMultipleCumulativeConsumptions() func(tb testing.TB, input pmetric.Metrics) bool { numCumulativeConsumptions := 0 - return func(t testing.TB, input pmetric.Metrics) bool { + return func(tb testing.TB, input pmetric.Metrics) bool { numCumulativeConsumptions++ - return verifyConsumeMetricsInput(t, input, pmetric.AggregationTemporalityCumulative, numCumulativeConsumptions) + return verifyConsumeMetricsInput(tb, input, pmetric.AggregationTemporalityCumulative, numCumulativeConsumptions) } } // verifyConsumeMetricsInput verifies the input of the ConsumeMetrics call from this connector. // This is the best point to verify the computed metrics from spans are as expected. -func verifyConsumeMetricsInput(t testing.TB, input pmetric.Metrics, expectedTemporality pmetric.AggregationTemporality, numCumulativeConsumptions int) bool { - require.Equal(t, 6, input.DataPointCount(), +func verifyConsumeMetricsInput(tb testing.TB, input pmetric.Metrics, expectedTemporality pmetric.AggregationTemporality, numCumulativeConsumptions int) bool { + require.Equal(tb, 6, input.DataPointCount(), "Should be 3 for each of call count and latency split into two resource scopes defined by: "+ "service-a: service-a (server kind) -> service-a (client kind) and "+ "service-b: service-b (service kind)", ) - require.Equal(t, 2, input.ResourceMetrics().Len()) + require.Equal(tb, 2, input.ResourceMetrics().Len()) for i := 0; i < input.ResourceMetrics().Len(); i++ { rm := input.ResourceMetrics().At(i) var numDataPoints int val, ok := rm.Resource().Attributes().Get(serviceNameKey) - require.True(t, ok) + require.True(tb, ok) serviceName := val.AsString() if serviceName == "service-a" { numDataPoints = 2 @@ -170,68 +170,68 @@ func verifyConsumeMetricsInput(t testing.TB, input pmetric.Metrics, expectedTemp } ilm := rm.ScopeMetrics() - require.Equal(t, 1, ilm.Len()) - assert.Equal(t, "spanmetricsconnector", ilm.At(0).Scope().Name()) + require.Equal(tb, 1, ilm.Len()) + assert.Equal(tb, "spanmetricsconnector", ilm.At(0).Scope().Name()) m := ilm.At(0).Metrics() - require.Equal(t, 2, m.Len(), "only sum and histogram metric types generated") + require.Equal(tb, 2, m.Len(), "only sum and histogram metric types generated") // validate calls - sum metrics metric := m.At(0) - assert.Equal(t, metricNameCalls, metric.Name()) - assert.Equal(t, expectedTemporality, metric.Sum().AggregationTemporality()) - assert.True(t, metric.Sum().IsMonotonic()) + assert.Equal(tb, metricNameCalls, metric.Name()) + assert.Equal(tb, expectedTemporality, metric.Sum().AggregationTemporality()) + assert.True(tb, metric.Sum().IsMonotonic()) seenMetricIDs := make(map[metricID]bool) callsDps := metric.Sum().DataPoints() - require.Equal(t, numDataPoints, callsDps.Len()) + require.Equal(tb, numDataPoints, callsDps.Len()) for dpi := 0; dpi < numDataPoints; dpi++ { dp := callsDps.At(dpi) - assert.Equal(t, + assert.Equal(tb, int64(numCumulativeConsumptions), dp.IntValue(), "There should only be one metric per Service/name/kind combination", ) - assert.NotZero(t, dp.StartTimestamp(), "StartTimestamp should be set") - assert.NotZero(t, dp.Timestamp(), "Timestamp should be set") - verifyMetricLabels(dp, t, seenMetricIDs) + assert.NotZero(tb, dp.StartTimestamp(), "StartTimestamp should be set") + assert.NotZero(tb, dp.Timestamp(), "Timestamp should be set") + verifyMetricLabels(tb, dp, seenMetricIDs) } // validate latency - histogram metrics metric = m.At(1) - assert.Equal(t, metricNameDuration, metric.Name()) - assert.Equal(t, defaultUnit.String(), metric.Unit()) + assert.Equal(tb, metricNameDuration, metric.Name()) + assert.Equal(tb, defaultUnit.String(), metric.Unit()) if metric.Type() == pmetric.MetricTypeExponentialHistogram { hist := metric.ExponentialHistogram() - assert.Equal(t, expectedTemporality, hist.AggregationTemporality()) - verifyExponentialHistogramDataPoints(t, hist.DataPoints(), numDataPoints, numCumulativeConsumptions) + assert.Equal(tb, expectedTemporality, hist.AggregationTemporality()) + verifyExponentialHistogramDataPoints(tb, hist.DataPoints(), numDataPoints, numCumulativeConsumptions) } else { hist := metric.Histogram() - assert.Equal(t, expectedTemporality, hist.AggregationTemporality()) - verifyExplicitHistogramDataPoints(t, hist.DataPoints(), numDataPoints, numCumulativeConsumptions) + assert.Equal(tb, expectedTemporality, hist.AggregationTemporality()) + verifyExplicitHistogramDataPoints(tb, hist.DataPoints(), numDataPoints, numCumulativeConsumptions) } } return true } -func verifyExplicitHistogramDataPoints(t testing.TB, dps pmetric.HistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) { +func verifyExplicitHistogramDataPoints(tb testing.TB, dps pmetric.HistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) { seenMetricIDs := make(map[metricID]bool) - require.Equal(t, numDataPoints, dps.Len()) + require.Equal(tb, numDataPoints, dps.Len()) for dpi := 0; dpi < numDataPoints; dpi++ { dp := dps.At(dpi) assert.Equal( - t, + tb, sampleDuration*float64(numCumulativeConsumptions), dp.Sum(), "Should be a 11ms duration measurement, multiplied by the number of stateful accumulations.") - assert.NotZero(t, dp.Timestamp(), "Timestamp should be set") + assert.NotZero(tb, dp.Timestamp(), "Timestamp should be set") // Verify bucket counts. // The bucket counts should be 1 greater than the explicit bounds as documented in: // https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto. - assert.Equal(t, dp.ExplicitBounds().Len()+1, dp.BucketCounts().Len()) + assert.Equal(tb, dp.ExplicitBounds().Len()+1, dp.BucketCounts().Len()) // Find the bucket index where the 11ms duration should belong in. var foundDurationIndex int @@ -248,31 +248,31 @@ func verifyExplicitHistogramDataPoints(t testing.TB, dps pmetric.HistogramDataPo if bi == foundDurationIndex { wantBucketCount = uint64(numCumulativeConsumptions) } - assert.Equal(t, wantBucketCount, dp.BucketCounts().At(bi)) + assert.Equal(tb, wantBucketCount, dp.BucketCounts().At(bi)) } - verifyMetricLabels(dp, t, seenMetricIDs) + verifyMetricLabels(tb, dp, seenMetricIDs) } } -func verifyExponentialHistogramDataPoints(t testing.TB, dps pmetric.ExponentialHistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) { +func verifyExponentialHistogramDataPoints(tb testing.TB, dps pmetric.ExponentialHistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) { seenMetricIDs := make(map[metricID]bool) - require.Equal(t, numDataPoints, dps.Len()) + require.Equal(tb, numDataPoints, dps.Len()) for dpi := 0; dpi < numDataPoints; dpi++ { dp := dps.At(dpi) assert.Equal( - t, + tb, sampleDuration*float64(numCumulativeConsumptions), dp.Sum(), "Should be a 11ms duration measurement, multiplied by the number of stateful accumulations.") - assert.Equal(t, uint64(numCumulativeConsumptions), dp.Count()) - assert.Equal(t, []uint64{uint64(numCumulativeConsumptions)}, dp.Positive().BucketCounts().AsRaw()) - assert.NotZero(t, dp.Timestamp(), "Timestamp should be set") + assert.Equal(tb, uint64(numCumulativeConsumptions), dp.Count()) + assert.Equal(tb, []uint64{uint64(numCumulativeConsumptions)}, dp.Positive().BucketCounts().AsRaw()) + assert.NotZero(tb, dp.Timestamp(), "Timestamp should be set") - verifyMetricLabels(dp, t, seenMetricIDs) + verifyMetricLabels(tb, dp, seenMetricIDs) } } -func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metricID]bool) { +func verifyMetricLabels(tb testing.TB, dp metricDataPoint, seenMetricIDs map[metricID]bool) { mID := metricID{} wantDimensions := map[string]pcommon.Value{ stringAttrName: pcommon.NewValueStr("stringAttrValue"), @@ -296,17 +296,17 @@ func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metr case statusCodeKey: mID.statusCode = v.Str() case notInSpanAttrName1: - assert.Fail(t, notInSpanAttrName1+" should not be in this metric") + assert.Fail(tb, notInSpanAttrName1+" should not be in this metric") default: - assert.Equal(t, wantDimensions[k], v) + assert.Equal(tb, wantDimensions[k], v) delete(wantDimensions, k) } return true }) - assert.Empty(t, wantDimensions, "Did not see all expected dimensions in metric. Missing: ", wantDimensions) + assert.Empty(tb, wantDimensions, "Did not see all expected dimensions in metric. Missing: ", wantDimensions) // Service/name/kind should be a unique metric. - assert.False(t, seenMetricIDs[mID]) + assert.False(tb, seenMetricIDs[mID]) seenMetricIDs[mID] = true } diff --git a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go index 67e211517307..1bec6c28aa8c 100644 --- a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go @@ -120,7 +120,7 @@ func TestMetricDataToLogService(t *testing.T) { t.Errorf("Failed load log key value pairs from file, error: %v", err) return } - assert.Equal(t, len(wantLogs), len(gotLogs)) + assert.Len(t, gotLogs, len(wantLogs)) for j := 0; j < len(gotLogs); j++ { sort.Sort(logKeyValuePairs(gotLogPairs[j])) sort.Sort(logKeyValuePairs(wantLogs[j])) diff --git a/exporter/awsemfexporter/grouped_metric_test.go b/exporter/awsemfexporter/grouped_metric_test.go index 8688cfaaca03..fc50b5ab391a 100644 --- a/exporter/awsemfexporter/grouped_metric_test.go +++ b/exporter/awsemfexporter/grouped_metric_test.go @@ -118,7 +118,7 @@ func TestAddToGroupedMetric(t *testing.T) { assert.Len(t, groupedMetrics, 1) for _, v := range groupedMetrics { - assert.Equal(t, len(tc.expectedMetricInfo), len(v.metrics)) + assert.Len(t, v.metrics, len(tc.expectedMetricInfo)) assert.Equal(t, tc.expectedMetricInfo, v.metrics) assert.Len(t, v.labels, 2) assert.Equal(t, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, tc.expectedMetricType), v.metadata) diff --git a/exporter/awsemfexporter/metric_translator_test.go b/exporter/awsemfexporter/metric_translator_test.go index 3a8f9268624c..5434c957048d 100644 --- a/exporter/awsemfexporter/metric_translator_test.go +++ b/exporter/awsemfexporter/metric_translator_test.go @@ -177,7 +177,7 @@ func hashMetricSlice(metricSlice []cWMetricInfo) []string { // assertDimsEqual asserts whether dimension sets are equal // (i.e. has same sets of dimensions), regardless of order. func assertDimsEqual(t *testing.T, expected, actual [][]string) { - assert.Equal(t, len(expected), len(actual)) + assert.Len(t, actual, len(expected)) expectedDimensions := normalizeDimensionality(expected) actualDimensions := normalizeDimensionality(actual) assert.Equal(t, expectedDimensions, actualDimensions) @@ -215,7 +215,7 @@ func assertCWMeasurementEqual(t *testing.T, expected, actual cWMeasurement) { assert.Equal(t, expected.Namespace, actual.Namespace) // Check metrics - assert.Equal(t, len(expected.Metrics), len(actual.Metrics)) + assert.Len(t, actual.Metrics, len(expected.Metrics)) expectedHashSlice := hashMetricSlice(expected.Metrics) actualHashSlice := hashMetricSlice(actual.Metrics) assert.Equal(t, expectedHashSlice, actualHashSlice) @@ -226,7 +226,7 @@ func assertCWMeasurementEqual(t *testing.T, expected, actual cWMeasurement) { // assertCWMeasurementSliceEqual asserts whether CW Measurements are equal, regardless of order. func assertCWMeasurementSliceEqual(t *testing.T, expected, actual []cWMeasurement) { - assert.Equal(t, len(expected), len(actual)) + assert.Len(t, actual, len(expected)) seen := make([]bool, len(expected)) for _, actualMeasurement := range actual { hasMatch := false @@ -246,7 +246,7 @@ func assertCWMeasurementSliceEqual(t *testing.T, expected, actual []cWMeasuremen func assertCWMetricsEqual(t *testing.T, expected, actual *cWMetrics) { assert.Equal(t, expected.timestampMs, actual.timestampMs) assert.Equal(t, expected.fields, actual.fields) - assert.Equal(t, len(expected.measurements), len(actual.measurements)) + assert.Len(t, actual.measurements, len(expected.measurements)) assertCWMeasurementSliceEqual(t, expected.measurements, actual.measurements) } @@ -1459,7 +1459,7 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) { cWMeasurements := groupedMetricToCWMeasurementsWithFilters(groupedMetric, config) assert.NotNil(t, cWMeasurements) - assert.Equal(t, len(tc.expectedMeasurements), len(cWMeasurements)) + assert.Len(t, cWMeasurements, len(tc.expectedMeasurements)) assertCWMeasurementSliceEqual(t, tc.expectedMeasurements, cWMeasurements) }) } diff --git a/exporter/awsxrayexporter/awsxray_test.go b/exporter/awsxrayexporter/awsxray_test.go index 90d5c12adff9..63794246febb 100644 --- a/exporter/awsxrayexporter/awsxray_test.go +++ b/exporter/awsxrayexporter/awsxray_test.go @@ -105,8 +105,8 @@ func BenchmarkForTracesExporter(b *testing.B) { } } -func initializeTracesExporter(t testing.TB, exporterConfig *Config, registry telemetry.Registry) exporter.Traces { - t.Helper() +func initializeTracesExporter(tb testing.TB, exporterConfig *Config, registry telemetry.Registry) exporter.Traces { + tb.Helper() mconn := new(awsutil.Conn) traceExporter, err := newTracesExporter(exporterConfig, exportertest.NewNopSettings(), mconn, registry) if err != nil { @@ -115,11 +115,11 @@ func initializeTracesExporter(t testing.TB, exporterConfig *Config, registry tel return traceExporter } -func generateConfig(t testing.TB) *Config { - t.Setenv("AWS_ACCESS_KEY_ID", "AKIASSWVJUY4PZXXXXXX") - t.Setenv("AWS_SECRET_ACCESS_KEY", "XYrudg2H87u+ADAAq19Wqx3D41a09RsTXXXXXXXX") - t.Setenv("AWS_DEFAULT_REGION", "us-east-1") - t.Setenv("AWS_REGION", "us-east-1") +func generateConfig(tb testing.TB) *Config { + tb.Setenv("AWS_ACCESS_KEY_ID", "AKIASSWVJUY4PZXXXXXX") + tb.Setenv("AWS_SECRET_ACCESS_KEY", "XYrudg2H87u+ADAAq19Wqx3D41a09RsTXXXXXXXX") + tb.Setenv("AWS_DEFAULT_REGION", "us-east-1") + tb.Setenv("AWS_REGION", "us-east-1") factory := NewFactory() exporterConfig := factory.CreateDefaultConfig().(*Config) exporterConfig.Region = "us-east-1" diff --git a/exporter/azuremonitorexporter/metricexporter_test.go b/exporter/azuremonitorexporter/metricexporter_test.go index 520f3e627aef..b0acb856de3c 100644 --- a/exporter/azuremonitorexporter/metricexporter_test.go +++ b/exporter/azuremonitorexporter/metricexporter_test.go @@ -105,34 +105,34 @@ func TestSummaryEnvelopes(t *testing.T) { assert.Equal(t, contracts.Aggregation, dataPoint.Kind) } -func getDataPoint(t testing.TB, metric pmetric.Metric) *contracts.DataPoint { +func getDataPoint(tb testing.TB, metric pmetric.Metric) *contracts.DataPoint { var envelopes []*contracts.Envelope = getMetricPacker().MetricToEnvelopes(metric, getResource(), getScope()) - require.Len(t, envelopes, 1) + require.Len(tb, envelopes, 1) envelope := envelopes[0] - require.NotNil(t, envelope) + require.NotNil(tb, envelope) - assert.NotNil(t, envelope.Tags) - assert.Contains(t, envelope.Tags[contracts.InternalSdkVersion], "otelc-") - assert.NotNil(t, envelope.Time) + assert.NotNil(tb, envelope.Tags) + assert.Contains(tb, envelope.Tags[contracts.InternalSdkVersion], "otelc-") + assert.NotNil(tb, envelope.Time) - require.NotNil(t, envelope.Data) + require.NotNil(tb, envelope.Data) envelopeData := envelope.Data.(*contracts.Data) - assert.Equal(t, "MetricData", envelopeData.BaseType) + assert.Equal(tb, "MetricData", envelopeData.BaseType) - require.NotNil(t, envelopeData.BaseData) + require.NotNil(tb, envelopeData.BaseData) metricData := envelopeData.BaseData.(*contracts.MetricData) - require.Len(t, metricData.Metrics, 1) + require.Len(tb, metricData.Metrics, 1) dataPoint := metricData.Metrics[0] - require.NotNil(t, dataPoint) + require.NotNil(tb, dataPoint) actualProperties := metricData.Properties - require.Equal(t, "10", actualProperties["int_attribute"]) - require.Equal(t, "str_value", actualProperties["str_attribute"]) - require.Equal(t, "true", actualProperties["bool_attribute"]) - require.Equal(t, "1.2", actualProperties["double_attribute"]) + require.Equal(tb, "10", actualProperties["int_attribute"]) + require.Equal(tb, "str_value", actualProperties["str_attribute"]) + require.Equal(tb, "true", actualProperties["bool_attribute"]) + require.Equal(tb, "1.2", actualProperties["double_attribute"]) return dataPoint } diff --git a/exporter/fileexporter/file_exporter_test.go b/exporter/fileexporter/file_exporter_test.go index 9639eec9264c..f4d559693273 100644 --- a/exporter/fileexporter/file_exporter_test.go +++ b/exporter/fileexporter/file_exporter_test.go @@ -612,8 +612,8 @@ func TestExportMessageAsBuffer(t *testing.T) { } // tempFileName provides a temporary file name for testing. -func tempFileName(t testing.TB) string { - return filepath.Join(t.TempDir(), "fileexporter_test.tmp") +func tempFileName(tb testing.TB) string { + return filepath.Join(tb.TempDir(), "fileexporter_test.tmp") } // errorWriter is an io.Writer that will return an error all ways diff --git a/exporter/honeycombmarkerexporter/logs_exporter_test.go b/exporter/honeycombmarkerexporter/logs_exporter_test.go index 403e43863a1b..bf79231ea80b 100644 --- a/exporter/honeycombmarkerexporter/logs_exporter_test.go +++ b/exporter/honeycombmarkerexporter/logs_exporter_test.go @@ -125,7 +125,7 @@ func TestExportMarkers(t *testing.T) { assert.NoError(t, err) - assert.Equal(t, len(tt.attributeMap), len(decodedBody)) + assert.Len(t, decodedBody, len(tt.attributeMap)) for attr := range tt.attributeMap { assert.Equal(t, tt.attributeMap[attr], decodedBody[attr]) diff --git a/exporter/lokiexporter/exporter_test.go b/exporter/lokiexporter/exporter_test.go index 37f29f29d3da..65179e609120 100644 --- a/exporter/lokiexporter/exporter_test.go +++ b/exporter/lokiexporter/exporter_test.go @@ -274,7 +274,7 @@ func TestLogsToLokiRequestWithGroupingByTenant(t *testing.T) { // actualPushRequest is populated within the test http server, we check it here as assertions are better done at the // end of the test function - assert.Equal(t, len(actualPushRequestPerTenant), len(tC.expected)) + assert.Len(t, tC.expected, len(actualPushRequestPerTenant)) for tenant, request := range actualPushRequestPerTenant { pr, ok := tC.expected[tenant] assert.True(t, ok) diff --git a/exporter/prometheusexporter/collector_test.go b/exporter/prometheusexporter/collector_test.go index 0552f2ec7135..d37a424c4712 100644 --- a/exporter/prometheusexporter/collector_test.go +++ b/exporter/prometheusexporter/collector_test.go @@ -689,7 +689,7 @@ func TestAccumulateHistograms(t *testing.T) { h := pbMetric.Histogram require.Equal(t, tt.histogramCount, h.GetSampleCount()) require.Equal(t, tt.histogramSum, h.GetSampleSum()) - require.Equal(t, len(tt.histogramPoints), len(h.Bucket)) + require.Len(t, h.Bucket, len(tt.histogramPoints)) for _, b := range h.Bucket { require.Equal(t, tt.histogramPoints[(*b).GetUpperBound()], b.GetCumulativeCount()) diff --git a/exporter/pulsarexporter/marshaler_test.go b/exporter/pulsarexporter/marshaler_test.go index d84f69c89e02..82f10b97bf10 100644 --- a/exporter/pulsarexporter/marshaler_test.go +++ b/exporter/pulsarexporter/marshaler_test.go @@ -24,7 +24,7 @@ func TestDefaultTracesMarshalers(t *testing.T) { "jaeger_json", } marshalers := tracesMarshalers() - assert.Equal(t, len(expectedEncodings), len(marshalers)) + assert.Len(t, marshalers, len(expectedEncodings)) for _, e := range expectedEncodings { t.Run(e, func(t *testing.T) { m, ok := marshalers[e] @@ -40,7 +40,7 @@ func TestDefaultMetricsMarshalers(t *testing.T) { "otlp_json", } marshalers := metricsMarshalers() - assert.Equal(t, len(expectedEncodings), len(marshalers)) + assert.Len(t, marshalers, len(expectedEncodings)) for _, e := range expectedEncodings { t.Run(e, func(t *testing.T) { m, ok := marshalers[e] @@ -56,7 +56,7 @@ func TestDefaultLogsMarshalers(t *testing.T) { "otlp_json", } marshalers := logsMarshalers() - assert.Equal(t, len(expectedEncodings), len(marshalers)) + assert.Len(t, marshalers, len(expectedEncodings)) for _, e := range expectedEncodings { t.Run(e, func(t *testing.T) { m, ok := marshalers[e]