From 6acb80f5d418390adb412020c8e77ecbda5fe27b Mon Sep 17 00:00:00 2001 From: William Dumont Date: Fri, 27 Oct 2023 16:27:36 +0200 Subject: [PATCH] Integration tests proposal (#5475) * add integration tests * add readme * use otlp mimir endpoint * remove docker cleanup to test cache * cleanup docker images when using docker compose down * try running integration tests only on PRs --- .github/workflows/integration-tests.yml | 17 ++ Makefile | 11 +- integration-tests/README.md | 29 +++ integration-tests/common/common.go | 34 +++ integration-tests/common/log.go | 20 ++ integration-tests/common/metric.go | 49 +++++ integration-tests/configs/mimir/mimir.yaml | 63 ++++++ .../otel-collector-contrib.yaml | 27 +++ .../configs/otel-gen-client/Dockerfile | 11 + .../configs/otel-gen-client/main.go | 205 ++++++++++++++++++ .../configs/otel-gen-server/Dockerfile | 11 + .../configs/otel-gen-server/main.go | 155 +++++++++++++ integration-tests/docker-compose.yaml | 62 ++++++ integration-tests/main.go | 55 +++++ .../tests/otlp-metrics/config.river | 31 +++ .../tests/otlp-metrics/otlp_metrics_test.go | 23 ++ .../tests/read-log-file/config.river | 15 ++ .../tests/read-log-file/logs.txt | 13 ++ .../tests/read-log-file/read_log_file_test.go | 26 +++ .../tests/scrape-prom-metrics/config.river | 23 ++ .../scrape_prom_metrics_test.go | 23 ++ integration-tests/utils.go | 134 ++++++++++++ 22 files changed, 1034 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/integration-tests.yml create mode 100644 integration-tests/README.md create mode 100644 integration-tests/common/common.go create mode 100644 integration-tests/common/log.go create mode 100644 integration-tests/common/metric.go create mode 100644 integration-tests/configs/mimir/mimir.yaml create mode 100644 integration-tests/configs/otel-collector-contrib/otel-collector-contrib.yaml create mode 100644 integration-tests/configs/otel-gen-client/Dockerfile create mode 100644 integration-tests/configs/otel-gen-client/main.go create mode 100644 integration-tests/configs/otel-gen-server/Dockerfile create mode 100644 integration-tests/configs/otel-gen-server/main.go create mode 100644 integration-tests/docker-compose.yaml create mode 100644 integration-tests/main.go create mode 100644 integration-tests/tests/otlp-metrics/config.river create mode 100644 integration-tests/tests/otlp-metrics/otlp_metrics_test.go create mode 100644 integration-tests/tests/read-log-file/config.river create mode 100644 integration-tests/tests/read-log-file/logs.txt create mode 100644 integration-tests/tests/read-log-file/read_log_file_test.go create mode 100644 integration-tests/tests/scrape-prom-metrics/config.river create mode 100644 integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go create mode 100644 integration-tests/utils.go diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 000000000000..d2d7f2cd7aaf --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,17 @@ +name: Integration Tests +on: + pull_request: +jobs: + run_tests: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v4 + with: + go-version: "1.21.0" + - name: Set OTEL Exporter Endpoint + run: echo "OTEL_EXPORTER_ENDPOINT=http://172.17.0.1:8080" >> $GITHUB_ENV + - name: Run tests + run: make integration-test \ No newline at end of file diff --git a/Makefile b/Makefile index 937a95ab993a..d26f2cae5843 100644 --- a/Makefile +++ b/Makefile @@ -15,8 +15,9 @@ ## ## Targets for running tests: ## -## test Run tests -## lint Lint code +## test Run tests +## lint Lint code +## integration-tests Run integration tests ## ## Targets for building binaries: ## @@ -172,13 +173,17 @@ lint: agentlint # We have to run test twice: once for all packages with -race and then once # more without -race for packages that have known race detection issues. test: - $(GO_ENV) go test $(GO_FLAGS) -race ./... + $(GO_ENV) go test $(GO_FLAGS) -race $(shell go list ./... | grep -v /integration-tests/) $(GO_ENV) go test $(GO_FLAGS) ./pkg/integrations/node_exporter ./pkg/logs ./pkg/operator ./pkg/util/k8s ./component/otelcol/processor/tail_sampling ./component/loki/source/file test-packages: docker pull $(BUILD_IMAGE) go test -tags=packaging ./packaging +.PHONY: integration-tests +integration-test: + cd integration-tests && $(GO_ENV) go run . + # # Targets for building binaries # diff --git a/integration-tests/README.md b/integration-tests/README.md new file mode 100644 index 000000000000..2d9d8fd9c269 --- /dev/null +++ b/integration-tests/README.md @@ -0,0 +1,29 @@ +# Integration tests + +This document provides an outline of how to run and add new integration tests to the project. + +The purpose of these tests is to verify simple, happy-path pipelines to catch issues between the agent and external dependencies. + +The external dependencies are launched as Docker containers. + +## Running tests + +Execute the integration tests using the following command: + +`go run .` + +### Flags + +* `--skip-build`: Run the integration tests without building the agent (default: `false`) +* `--test`: Specifies a particular directory within the tests directory to run (default: runs all tests) + +## Adding new tests + +Follow these steps to add a new integration test to the project: + +1. If the test requires external resources, define them as Docker images within the `docker-compose.yaml` file. +2. Create a new directory under the tests directory to house the files for the new test. +3. Within the new test directory, create a file named `config.river` to hold the pipeline configuration you want to test. +4. Create a `_test.go` file within the new test directory. This file should contain the Go code necessary to run the test and verify the data processing through the pipeline. + + _NOTE_: The tests run concurrently. Each agent must tag its data with a label that corresponds to its specific configuration. This ensures the correct data verification during the Go testing process. diff --git a/integration-tests/common/common.go b/integration-tests/common/common.go new file mode 100644 index 000000000000..9049fa47018a --- /dev/null +++ b/integration-tests/common/common.go @@ -0,0 +1,34 @@ +package common + +import ( + "errors" + "io" + "net/http" + "time" +) + +type Unmarshaler interface { + Unmarshal([]byte) error +} + +const DefaultRetryInterval = 100 * time.Millisecond +const DefaultTimeout = time.Minute + +func FetchDataFromURL(url string, target Unmarshaler) error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return errors.New("Non-OK HTTP status: " + resp.Status) + } + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return target.Unmarshal(bodyBytes) +} diff --git a/integration-tests/common/log.go b/integration-tests/common/log.go new file mode 100644 index 000000000000..d1f1655cb926 --- /dev/null +++ b/integration-tests/common/log.go @@ -0,0 +1,20 @@ +package common + +import "encoding/json" + +type LogResponse struct { + Status string `json:"status"` + Data struct { + ResultType string `json:"resultType"` + Result []LogData `json:"result"` + } `json:"data"` +} + +type LogData struct { + Stream map[string]string `json:"stream"` + Values [][2]string `json:"values"` +} + +func (m *LogResponse) Unmarshal(data []byte) error { + return json.Unmarshal(data, m) +} diff --git a/integration-tests/common/metric.go b/integration-tests/common/metric.go new file mode 100644 index 000000000000..ea5622fe7fad --- /dev/null +++ b/integration-tests/common/metric.go @@ -0,0 +1,49 @@ +package common + +import ( + "encoding/json" + "fmt" +) + +type MetricResponse struct { + Status string `json:"status"` + Data MetricData `json:"data"` +} + +type MetricData struct { + ResultType string `json:"resultType"` + Result []MetricResult `json:"result"` +} + +// TODO: check for the type +type MetricResult struct { + Metric Metric `json:"metric"` + Value Value `json:"value"` +} + +type Value struct { + Timestamp int64 + Value string +} + +func (v *Value) UnmarshalJSON(b []byte) error { + var arr []interface{} + if err := json.Unmarshal(b, &arr); err != nil { + return err + } + if len(arr) != 2 { + return fmt.Errorf("expected 2 values, got %d", len(arr)) + } + v.Timestamp, _ = arr[0].(int64) + v.Value, _ = arr[1].(string) + return nil +} + +type Metric struct { + TestName string `json:"test_name"` + Name string `json:"__name__"` +} + +func (m *MetricResponse) Unmarshal(data []byte) error { + return json.Unmarshal(data, m) +} diff --git a/integration-tests/configs/mimir/mimir.yaml b/integration-tests/configs/mimir/mimir.yaml new file mode 100644 index 000000000000..e48447f24c3a --- /dev/null +++ b/integration-tests/configs/mimir/mimir.yaml @@ -0,0 +1,63 @@ +# Do not use this configuration in production. +# It is for demonstration purposes only. +multitenancy_enabled: false + +activity_tracker: {} + +alertmanager: {} + +alertmanager_storage: + backend: local + +server: + http_listen_port: 9009 + + # Configure the server to allow messages up to 100MB. + grpc_server_max_recv_msg_size: 104857600 + grpc_server_max_send_msg_size: 104857600 + grpc_server_max_concurrent_streams: 1000 + +distributor: + pool: + health_check_ingesters: true + +ingester_client: + grpc_client_config: + grpc_compression: gzip + max_recv_msg_size: 104857600 + max_send_msg_size: 104857600 + +ingester: + ring: + final_sleep: 0s + kvstore: + store: inmemory + min_ready_duration: 0s + num_tokens: 512 + replication_factor: 1 + +blocks_storage: + backend: filesystem + bucket_store: + sync_dir: /tmp/mimir/tsdb-sync + filesystem: + dir: /tmp/mimir/blocks + tsdb: + dir: /tmp/mimir/tsdb + +compactor: + sharding_ring: + kvstore: + store: inmemory + +ruler: + enable_api: true + +ruler_storage: + backend: filesystem + local: + directory: /tmp/mimir/rules + +limits: + ingestion_burst_size: 500000 + ingestion_rate: 250000 diff --git a/integration-tests/configs/otel-collector-contrib/otel-collector-contrib.yaml b/integration-tests/configs/otel-collector-contrib/otel-collector-contrib.yaml new file mode 100644 index 000000000000..7359cfb35215 --- /dev/null +++ b/integration-tests/configs/otel-collector-contrib/otel-collector-contrib.yaml @@ -0,0 +1,27 @@ +receivers: + otlp: + protocols: + grpc: + +exporters: + logging: + + otlphttp: + endpoint: ${OTEL_EXPORTER_ENDPOINT} + + +connectors: + spanmetrics: + namespace: span.metrics + exemplars: + enabled: true + metrics_flush_interval: 1s + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [spanmetrics] + metrics: + receivers: [spanmetrics] + exporters: [otlphttp] diff --git a/integration-tests/configs/otel-gen-client/Dockerfile b/integration-tests/configs/otel-gen-client/Dockerfile new file mode 100644 index 000000000000..aae9f646067b --- /dev/null +++ b/integration-tests/configs/otel-gen-client/Dockerfile @@ -0,0 +1,11 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 +FROM golang:1.21 as build +WORKDIR /app/ +COPY go.mod go.sum ./ +RUN go mod download +COPY ./integration-tests/configs/otel-gen-client/ ./ +RUN CGO_ENABLED=0 go build -o main main.go +FROM alpine:3.18 +COPY --from=build /app/main /app/main +CMD ["/app/main"] diff --git a/integration-tests/configs/otel-gen-client/main.go b/integration-tests/configs/otel-gen-client/main.go new file mode 100644 index 000000000000..7066daccf26d --- /dev/null +++ b/integration-tests/configs/otel-gen-client/main.go @@ -0,0 +1,205 @@ +// This file was copied with minor modifications from https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/b0be5c98325ec71f35b82e278a3fc3e6f3fe4954/examples/demo/client/main.go + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Sample contains a simple client that periodically makes a simple http request +// to a server and exports to the OpenTelemetry service. +package main + +import ( + "context" + "log" + "math/rand" + "net/http" + "os" + "time" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "google.golang.org/grpc" +) + +const ( + otelExporterOtlpEndpoint = "OTEL_EXPORTER_OTLP_ENDPOINT" + demoServerEndpoint = "DEMO_SERVER_ENDPOINT" +) + +// Initializes an OTLP exporter, and configures the corresponding trace and +// metric providers. +func initProvider() func() { + ctx := context.Background() + + res, err := resource.New(ctx, + resource.WithFromEnv(), + resource.WithProcess(), + resource.WithTelemetrySDK(), + resource.WithHost(), + resource.WithAttributes( + // the service name used to display traces in backends + semconv.ServiceNameKey.String("demo-client"), + ), + ) + handleErr(err, "failed to create resource") + + otelAgentAddr, ok := os.LookupEnv(otelExporterOtlpEndpoint) + if !ok { + otelAgentAddr = "0.0.0.0:4317" + } + + metricExp, err := otlpmetricgrpc.New( + ctx, + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithEndpoint(otelAgentAddr), + ) + handleErr(err, "Failed to create the collector metric exporter") + + meterProvider := sdkmetric.NewMeterProvider( + sdkmetric.WithResource(res), + sdkmetric.WithReader( + sdkmetric.NewPeriodicReader( + metricExp, + sdkmetric.WithInterval(2*time.Second), + ), + ), + ) + otel.SetMeterProvider(meterProvider) + + traceClient := otlptracegrpc.NewClient( + otlptracegrpc.WithInsecure(), + otlptracegrpc.WithEndpoint(otelAgentAddr), + otlptracegrpc.WithDialOption(grpc.WithBlock())) + sctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + traceExp, err := otlptrace.New(sctx, traceClient) + handleErr(err, "Failed to create the collector trace exporter") + + bsp := sdktrace.NewBatchSpanProcessor(traceExp) + tracerProvider := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithResource(res), + sdktrace.WithSpanProcessor(bsp), + ) + + // set global propagator to tracecontext (the default is no-op). + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) + otel.SetTracerProvider(tracerProvider) + + return func() { + cxt, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + if err := traceExp.Shutdown(cxt); err != nil { + otel.Handle(err) + } + // pushes any last exports to the receiver + if err := meterProvider.Shutdown(cxt); err != nil { + otel.Handle(err) + } + } +} + +func handleErr(err error, message string) { + if err != nil { + log.Fatalf("%s: %v", message, err) + } +} + +func main() { + shutdown := initProvider() + defer shutdown() + + tracer := otel.Tracer("demo-client-tracer") + meter := otel.Meter("demo-client-meter") + + method, _ := baggage.NewMember("method", "repl") + client, _ := baggage.NewMember("client", "cli") + bag, _ := baggage.New(method, client) + + // labels represent additional key-value descriptors that can be bound to a + // metric observer or recorder. + // TODO: Use baggage when supported to extract labels from baggage. + commonLabels := []attribute.KeyValue{ + attribute.String("method", "repl"), + attribute.String("client", "cli"), + } + + // Recorder metric example + requestLatency, _ := meter.Float64Histogram( + "demo_client/request_latency", + metric.WithDescription("The latency of requests processed"), + ) + + // TODO: Use a view to just count number of measurements for requestLatency when available. + requestCount, _ := meter.Int64Counter( + "demo_client/request_counts", + metric.WithDescription("The number of requests processed"), + ) + + lineLengths, _ := meter.Int64Histogram( + "demo_client/line_lengths", + metric.WithDescription("The lengths of the various lines in"), + ) + + // TODO: Use a view to just count number of measurements for lineLengths when available. + lineCounts, _ := meter.Int64Counter( + "demo_client/line_counts", + metric.WithDescription("The counts of the lines in"), + ) + + defaultCtx := baggage.ContextWithBaggage(context.Background(), bag) + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + for { + startTime := time.Now() + ctx, span := tracer.Start(defaultCtx, "ExecuteRequest") + makeRequest(ctx) + span.End() + latencyMs := float64(time.Since(startTime)) / 1e6 + nr := int(rng.Int31n(7)) + for i := 0; i < nr; i++ { + randLineLength := rng.Int63n(999) + lineCounts.Add(ctx, 1, metric.WithAttributes(commonLabels...)) + lineLengths.Record(ctx, randLineLength, metric.WithAttributes(commonLabels...)) + } + + requestLatency.Record(ctx, latencyMs, metric.WithAttributes(commonLabels...)) + requestCount.Add(ctx, 1, metric.WithAttributes(commonLabels...)) + + time.Sleep(time.Duration(1) * time.Second) + } +} + +func makeRequest(ctx context.Context) { + demoServerAddr, ok := os.LookupEnv(demoServerEndpoint) + if !ok { + demoServerAddr = "http://0.0.0.0:7080/hello" + } + + // Trace an HTTP client by wrapping the transport + client := http.Client{ + Transport: otelhttp.NewTransport(http.DefaultTransport), + } + + // Make sure we pass the context to the request to avoid broken traces. + req, err := http.NewRequestWithContext(ctx, "GET", demoServerAddr, nil) + if err != nil { + handleErr(err, "failed to http request") + } + + // All requests made with this client will create spans. + res, err := client.Do(req) + if err != nil { + panic(err) + } + res.Body.Close() +} diff --git a/integration-tests/configs/otel-gen-server/Dockerfile b/integration-tests/configs/otel-gen-server/Dockerfile new file mode 100644 index 000000000000..bc6bc1c6d136 --- /dev/null +++ b/integration-tests/configs/otel-gen-server/Dockerfile @@ -0,0 +1,11 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 +FROM golang:1.21 as build +WORKDIR /app/ +COPY go.mod go.sum ./ +RUN go mod download +COPY ./integration-tests/configs/otel-gen-server/ ./ +RUN CGO_ENABLED=0 go build -o main main.go +FROM alpine:3.18 +COPY --from=build /app/main /app/main +CMD ["/app/main"] diff --git a/integration-tests/configs/otel-gen-server/main.go b/integration-tests/configs/otel-gen-server/main.go new file mode 100644 index 000000000000..97bfb1ca2ef7 --- /dev/null +++ b/integration-tests/configs/otel-gen-server/main.go @@ -0,0 +1,155 @@ +// This file was copied with minor modifications from https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/b0be5c98325ec71f35b82e278a3fc3e6f3fe4954/examples/demo/server/main.go + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Sample contains a simple http server that exports to the OpenTelemetry agent. + +package main + +import ( + "context" + "log" + "net/http" + "os" + "time" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc" +) + +const otelExporterOtlpEndpoint = "OTEL_EXPORTER_OTLP_ENDPOINT" + +// Initializes an OTLP exporter, and configures the corresponding trace and +// metric providers. +func initProvider() func() { + ctx := context.Background() + + res, err := resource.New(ctx, + resource.WithFromEnv(), + resource.WithProcess(), + resource.WithTelemetrySDK(), + resource.WithHost(), + resource.WithAttributes( + // the service name used to display traces in backends + semconv.ServiceNameKey.String("demo-server"), + ), + ) + handleErr(err, "failed to create resource") + + otelAgentAddr, ok := os.LookupEnv(otelExporterOtlpEndpoint) + if !ok { + otelAgentAddr = "0.0.0.0:4317" + } + + metricExp, err := otlpmetricgrpc.New( + ctx, + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithEndpoint(otelAgentAddr)) + handleErr(err, "Failed to create the collector metric exporter") + + meterProvider := sdkmetric.NewMeterProvider( + sdkmetric.WithResource(res), + sdkmetric.WithReader( + sdkmetric.NewPeriodicReader( + metricExp, + sdkmetric.WithInterval(2*time.Second), + ), + ), + ) + otel.SetMeterProvider(meterProvider) + + traceClient := otlptracegrpc.NewClient( + otlptracegrpc.WithInsecure(), + otlptracegrpc.WithEndpoint(otelAgentAddr), + otlptracegrpc.WithDialOption(grpc.WithBlock())) + traceExp, err := otlptrace.New(ctx, traceClient) + handleErr(err, "Failed to create the collector trace exporter") + + bsp := sdktrace.NewBatchSpanProcessor(traceExp) + tracerProvider := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithResource(res), + sdktrace.WithSpanProcessor(bsp), + ) + + // set global propagator to tracecontext (the default is no-op). + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) + otel.SetTracerProvider(tracerProvider) + + return func() { + cxt, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + if err := traceExp.Shutdown(cxt); err != nil { + otel.Handle(err) + } + // pushes any last exports to the receiver + if err := meterProvider.Shutdown(cxt); err != nil { + otel.Handle(err) + } + } +} + +func handleErr(err error, message string) { + if err != nil { + log.Fatalf("%s: %v", message, err) + } +} + +func main() { + shutdown := initProvider() + defer shutdown() + + meter := otel.Meter("demo-server-meter") + serverAttribute := attribute.String("server-attribute", "foo") + commonLabels := []attribute.KeyValue{serverAttribute} + requestCount, _ := meter.Int64Counter( + "demo_server/request_counts", + metric.WithDescription("The number of requests received"), + ) + + // create a handler wrapped in OpenTelemetry instrumentation + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + time.Sleep(time.Duration(100) * time.Millisecond) + ctx := req.Context() + requestCount.Add(ctx, 1, metric.WithAttributes(commonLabels...)) + span := trace.SpanFromContext(ctx) + bag := baggage.FromContext(ctx) + + var baggageAttributes []attribute.KeyValue + baggageAttributes = append(baggageAttributes, serverAttribute) + for _, member := range bag.Members() { + baggageAttributes = append(baggageAttributes, attribute.String("baggage key:"+member.Key(), member.Value())) + } + span.SetAttributes(baggageAttributes...) + + if _, err := w.Write([]byte("Hello World")); err != nil { + http.Error(w, "write operation failed.", http.StatusInternalServerError) + return + } + }) + + mux := http.NewServeMux() + mux.Handle("/hello", otelhttp.NewHandler(handler, "/hello")) + server := &http.Server{ + Addr: ":7080", + Handler: mux, + ReadHeaderTimeout: 20 * time.Second, + } + if err := server.ListenAndServe(); err != http.ErrServerClosed { + handleErr(err, "server failed to serve") + } +} diff --git a/integration-tests/docker-compose.yaml b/integration-tests/docker-compose.yaml new file mode 100644 index 000000000000..2aa98f9b1500 --- /dev/null +++ b/integration-tests/docker-compose.yaml @@ -0,0 +1,62 @@ +version: "3" +services: + + mimir: + image: grafana/mimir:2.9.0 + volumes: + - ./configs/mimir:/etc/mimir-config + entrypoint: + - /bin/mimir + - -config.file=/etc/mimir-config/mimir.yaml + ports: + - "9009:9009" + + loki: + image: grafana/loki:2.8.3 + command: -config.file=/etc/loki/local-config.yaml + ports: + - "3100:3100" + + otel-collector: + image: otel/opentelemetry-collector-contrib:0.85.0 + restart: always + command: ["--config=/etc/otel-collector-contrib.yaml", ""] + volumes: + - ./configs/otel-collector-contrib/otel-collector-contrib.yaml:/etc/otel-collector-contrib.yaml + ports: + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP HTTP exporter + environment: + - OTEL_EXPORTER_ENDPOINT=${OTEL_EXPORTER_ENDPOINT:-http://host.docker.internal:8080} + + demo-client: + build: + dockerfile: ./integration-tests/configs/otel-gen-client/Dockerfile + context: .. + restart: always + environment: + - OTEL_EXPORTER_OTLP_ENDPOINT=otel-collector:4317 + - DEMO_SERVER_ENDPOINT=http://demo-server:7080/hello + depends_on: + - demo-server + + demo-server: + build: + dockerfile: ./integration-tests/configs/otel-gen-server/Dockerfile + context: .. + restart: always + environment: + - OTEL_EXPORTER_OTLP_ENDPOINT=otel-collector:4317 + ports: + - "7080" + depends_on: + - otel-collector + + avalanche: + image: quay.io/freshtracks.io/avalanche:latest + command: + - --metric-count=50 + - --series-interval=7200 + - --metric-interval=7200 + ports: + - "9001:9001" \ No newline at end of file diff --git a/integration-tests/main.go b/integration-tests/main.go new file mode 100644 index 000000000000..29009742efae --- /dev/null +++ b/integration-tests/main.go @@ -0,0 +1,55 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" +) + +var specificTest string +var skipBuild bool + +func main() { + rootCmd := &cobra.Command{ + Use: "integration-tests", + Short: "Run integration tests", + Run: runIntegrationTests, + } + + rootCmd.PersistentFlags().StringVar(&specificTest, "test", "", "Specific test directory to run") + rootCmd.PersistentFlags().BoolVar(&skipBuild, "skip-build", false, "Skip building the agent") + + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func runIntegrationTests(cmd *cobra.Command, args []string) { + defer reportResults() + defer cleanUpEnvironment() + + if !skipBuild { + buildAgent() + } + setupEnvironment() + + if specificTest != "" { + fmt.Println("Running", specificTest) + if !filepath.IsAbs(specificTest) && !strings.HasPrefix(specificTest, "./tests/") { + specificTest = "./tests/" + specificTest + } + logChan = make(chan TestLog, 1) + runSingleTest(specificTest) + } else { + testDirs, err := filepath.Glob("./tests/*") + if err != nil { + panic(err) + } + logChan = make(chan TestLog, len(testDirs)) + runAllTests() + } +} diff --git a/integration-tests/tests/otlp-metrics/config.river b/integration-tests/tests/otlp-metrics/config.river new file mode 100644 index 000000000000..8c4f0ebc96e6 --- /dev/null +++ b/integration-tests/tests/otlp-metrics/config.river @@ -0,0 +1,31 @@ +otelcol.receiver.otlp "otlp_metrics" { + http { + endpoint="0.0.0.0:8080" + } + + output { + metrics = [otelcol.processor.attributes.otlp_metrics.input] + } +} + +otelcol.processor.attributes "otlp_metrics" { + action { + key = "test_name" + value = "otlp_metrics" + action = "insert" + } + + output { + metrics = [otelcol.exporter.otlphttp.otlp_metrics.input] + } +} + +otelcol.exporter.otlphttp "otlp_metrics" { + client { + endpoint = "http://localhost:9009/otlp" + tls { + insecure = true + insecure_skip_verify = true + } + } +} diff --git a/integration-tests/tests/otlp-metrics/otlp_metrics_test.go b/integration-tests/tests/otlp-metrics/otlp_metrics_test.go new file mode 100644 index 000000000000..a4bdd887c50c --- /dev/null +++ b/integration-tests/tests/otlp-metrics/otlp_metrics_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "testing" + + "github.com/grafana/agent/integration-tests/common" + "github.com/stretchr/testify/assert" +) + +const query = "http://localhost:9009/prometheus/api/v1/query?query=span_metrics_duration_bucket{test_name='otlp_metrics'}" + +func TestOtlpMetrics(t *testing.T) { + var metricResponse common.MetricResponse + assert.EventuallyWithT(t, func(c *assert.CollectT) { + err := common.FetchDataFromURL(query, &metricResponse) + assert.NoError(c, err) + if assert.NotEmpty(c, metricResponse.Data.Result) { + assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, "span_metrics_duration_bucket") + assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, "otlp_metrics") + assert.NotEmpty(c, metricResponse.Data.Result[0].Value.Value) + } + }, common.DefaultTimeout, common.DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") +} diff --git a/integration-tests/tests/read-log-file/config.river b/integration-tests/tests/read-log-file/config.river new file mode 100644 index 000000000000..55be3919717e --- /dev/null +++ b/integration-tests/tests/read-log-file/config.river @@ -0,0 +1,15 @@ +loki.source.file "test" { + targets = [ + {__path__ = "logs.txt"}, + ] + forward_to = [loki.write.test.receiver] +} + +loki.write "test" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } + external_labels = { + test_name = "read_log_file", + } +} diff --git a/integration-tests/tests/read-log-file/logs.txt b/integration-tests/tests/read-log-file/logs.txt new file mode 100644 index 000000000000..ed6c24c81170 --- /dev/null +++ b/integration-tests/tests/read-log-file/logs.txt @@ -0,0 +1,13 @@ +[2023-10-02 14:25:43] INFO: Starting the web application... +[2023-10-02 14:25:45] DEBUG: Database connection established. +[2023-10-02 14:26:01] INFO: User 'john_doe' logged in. +[2023-10-02 14:26:05] WARNING: User 'john_doe' attempted to access restricted area. +[2023-10-02 14:26:10] ERROR: Failed to retrieve data for item ID: 1234. +[2023-10-02 14:26:15] INFO: User 'john_doe' logged out. +[2023-10-02 14:27:00] INFO: User 'admin' logged in. +[2023-10-02 14:27:05] INFO: Data backup started. +[2023-10-02 14:30:00] INFO: Data backup completed successfully. +[2023-10-02 14:31:23] ERROR: Database connection lost. Retrying in 5 seconds... +[2023-10-02 14:31:28] INFO: Database reconnected. +[2023-10-02 14:32:00] INFO: User 'admin' logged out. +[2023-10-02 14:32:05] INFO: Shutting down the web application... diff --git a/integration-tests/tests/read-log-file/read_log_file_test.go b/integration-tests/tests/read-log-file/read_log_file_test.go new file mode 100644 index 000000000000..9f3730506d97 --- /dev/null +++ b/integration-tests/tests/read-log-file/read_log_file_test.go @@ -0,0 +1,26 @@ +package main + +import ( + "testing" + + "github.com/grafana/agent/integration-tests/common" + "github.com/stretchr/testify/assert" +) + +const query = "http://localhost:3100/loki/api/v1/query?query={test_name=%22read_log_file%22}" + +func TestReadLogFile(t *testing.T) { + var logResponse common.LogResponse + assert.EventuallyWithT(t, func(c *assert.CollectT) { + err := common.FetchDataFromURL(query, &logResponse) + assert.NoError(c, err) + if assert.NotEmpty(c, logResponse.Data.Result) { + assert.Equal(c, logResponse.Data.Result[0].Stream["filename"], "logs.txt") + logs := make([]string, len(logResponse.Data.Result[0].Values)) + for i, valuePair := range logResponse.Data.Result[0].Values { + logs[i] = valuePair[1] + } + assert.Contains(c, logs, "[2023-10-02 14:25:43] INFO: Starting the web application...") + } + }, common.DefaultTimeout, common.DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") +} diff --git a/integration-tests/tests/scrape-prom-metrics/config.river b/integration-tests/tests/scrape-prom-metrics/config.river new file mode 100644 index 000000000000..77fa88430977 --- /dev/null +++ b/integration-tests/tests/scrape-prom-metrics/config.river @@ -0,0 +1,23 @@ +prometheus.scrape "scrape_prom_metrics" { + targets = [ + {"__address__" = "localhost:9001"}, + ] + forward_to = [prometheus.remote_write.scrape_prom_metrics.receiver] + scrape_interval = "1s" + scrape_timeout = "500ms" +} + +prometheus.remote_write "scrape_prom_metrics" { + endpoint { + url = "http://localhost:9009/api/v1/push" + metadata_config { + send_interval = "1s" + } + queue_config { + max_samples_per_send = 100 + } + } + external_labels = { + test_name = "scrape_prom_metrics", + } +} diff --git a/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go b/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go new file mode 100644 index 000000000000..f6c99a08c132 --- /dev/null +++ b/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "testing" + + "github.com/grafana/agent/integration-tests/common" + "github.com/stretchr/testify/assert" +) + +const query = "http://localhost:9009/prometheus/api/v1/query?query=avalanche_metric_mmmmm_0_0{test_name='scrape_prom_metrics'}" + +func TestScrapePromMetrics(t *testing.T) { + var metricResponse common.MetricResponse + assert.EventuallyWithT(t, func(c *assert.CollectT) { + err := common.FetchDataFromURL(query, &metricResponse) + assert.NoError(c, err) + if assert.NotEmpty(c, metricResponse.Data.Result) { + assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, "avalanche_metric_mmmmm_0_0") + assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, "scrape_prom_metrics") + assert.NotEmpty(c, metricResponse.Data.Result[0].Value.Value) + } + }, common.DefaultTimeout, common.DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") +} diff --git a/integration-tests/utils.go b/integration-tests/utils.go new file mode 100644 index 000000000000..06aba877203c --- /dev/null +++ b/integration-tests/utils.go @@ -0,0 +1,134 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "sync" +) + +const ( + agentBinaryPath = "../../../build/grafana-agent-flow" + dockerComposeCmd = "docker-compose" + makeCmd = "make" +) + +type TestLog struct { + TestDir string + AgentLog string + TestOutput string +} + +var logChan chan TestLog + +func buildAgent() { + fmt.Println("Building agent...") + cmd := exec.Command(makeCmd, "-C", "..", "agent-flow") + if err := cmd.Run(); err != nil { + panic(err) + } +} + +func setupEnvironment() { + fmt.Println("Setting up environment with Docker Compose...") + cmd := exec.Command(dockerComposeCmd, "up", "-d") + if err := cmd.Run(); err != nil { + panic(err) + } +} + +func runSingleTest(testDir string) { + info, err := os.Stat(testDir) + if err != nil { + panic(err) + } + if !info.IsDir() { + return + } + + dirName := filepath.Base(testDir) + + var agentLogBuffer bytes.Buffer + cmd := exec.Command(agentBinaryPath, "run", "config.river") + cmd.Dir = testDir + cmd.Stdout = &agentLogBuffer + cmd.Stderr = &agentLogBuffer + + if err := cmd.Start(); err != nil { + logChan <- TestLog{ + TestDir: dirName, + AgentLog: fmt.Sprintf("Failed to start agent: %v", err), + } + return + } + + testCmd := exec.Command("go", "test") + testCmd.Dir = testDir + testOutput, errTest := testCmd.CombinedOutput() + + err = cmd.Process.Kill() + if err != nil { + panic(err) + } + + agentLog := agentLogBuffer.String() + + if errTest != nil { + logChan <- TestLog{ + TestDir: dirName, + AgentLog: agentLog, + TestOutput: string(testOutput), + } + } + + err = os.RemoveAll(filepath.Join(testDir, "data-agent")) + if err != nil { + panic(err) + } +} + +func runAllTests() { + testDirs, err := filepath.Glob("./tests/*") + if err != nil { + panic(err) + } + var wg sync.WaitGroup + + for _, testDir := range testDirs { + fmt.Println("Running", testDir) + wg.Add(1) + go func(td string) { + defer wg.Done() + runSingleTest(td) + }(testDir) + } + wg.Wait() +} + +func cleanUpEnvironment() { + fmt.Println("Cleaning up Docker environment...") + err := exec.Command(dockerComposeCmd, "down", "--volumes", "--rmi", "all").Run() + if err != nil { + panic(err) + } +} + +func reportResults() { + close(logChan) + testsFailed := 0 + for log := range logChan { + fmt.Printf("Failure detected in %s:\n", log.TestDir) + fmt.Println("Test output:", log.TestOutput) + fmt.Println("Agent logs:", log.AgentLog) + testsFailed++ + } + + if testsFailed > 0 { + fmt.Printf("%d tests failed!\n", testsFailed) + os.Exit(1) + } else { + fmt.Println("All integration tests passed!") + } +}