diff --git a/.github/workflows/build-image.yml b/.github/workflows/build-image.yml index 3eb6312a18..cd68ee75b8 100644 --- a/.github/workflows/build-image.yml +++ b/.github/workflows/build-image.yml @@ -12,26 +12,29 @@ on: - 'build-image/**' - '.github/workflows/build-image.yml' +permissions: + contents: read + jobs: build: runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 name: Checkout with: fetch-depth: 0 - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Save image run: make save-multiarch-build-image - name: Upload Docker Images Artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: build-image path: | @@ -44,13 +47,13 @@ jobs: if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 name: Checkout with: fetch-depth: 0 - name: Download Docker Images Artifacts - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: build-image @@ -58,7 +61,7 @@ jobs: run: make load-multiarch-build-image - name: Login to Quay.io - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: quay.io username: ${{secrets.QUAY_REGISTRY_USER}} diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 1820dca087..cd8a472ebd 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -20,7 +20,7 @@ jobs: image: quay.io/cortexproject/build-image:master-779dcf4ba steps: - name: Checkout Repo - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Setup Git safe.directory run: | echo "this step is needed because when running in container, actions/checkout does not set safe.directory effectively." @@ -49,7 +49,7 @@ jobs: image: quay.io/cortexproject/build-image:master-779dcf4ba steps: - name: Checkout Repo - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Setup Git safe.directory run: | echo "this step is needed because when running in container, actions/checkout does not set safe.directory effectively." @@ -71,19 +71,19 @@ jobs: security-events: write steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@429e1977040da7a23b6822b13c129cd1ba93dbb2 # v3.26.2 with: languages: go - name: Autobuild - uses: github/codeql-action/autobuild@v3 + uses: github/codeql-action/autobuild@429e1977040da7a23b6822b13c129cd1ba93dbb2 # v3.26.2 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@429e1977040da7a23b6822b13c129cd1ba93dbb2 # v3.26.2 build: @@ -92,7 +92,7 @@ jobs: image: quay.io/cortexproject/build-image:master-779dcf4ba steps: - name: Checkout Repo - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Setup Git safe.directory run: | echo "this step is needed because when running in container, actions/checkout does not set safe.directory effectively." @@ -113,7 +113,7 @@ jobs: touch build-image/.uptodate make BUILD_IN_CONTAINER=false web-build - name: Upload Website Artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: website public path: website/public/ @@ -125,7 +125,7 @@ jobs: - name: Create Docker Images Archive run: tar -cvf images.tar /tmp/images - name: Upload Docker Images Artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: Docker Images path: ./images.tar @@ -146,11 +146,11 @@ jobs: - integration_query_fuzz steps: - name: Upgrade golang - uses: actions/setup-go@v2 + uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 with: go-version: 1.22.5 - name: Checkout Repo - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Install Docker Client run: sudo ./.github/workflows/scripts/install-docker.sh - name: Sym Link Expected Path to Workspace @@ -158,7 +158,7 @@ jobs: sudo mkdir -p /go/src/github.com/cortexproject/cortex sudo ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Docker Images Artifacts - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: Docker Images - name: Extract Docker Images Archive @@ -188,6 +188,7 @@ jobs: docker pull quay.io/cortexproject/cortex:v1.16.1 docker pull quay.io/cortexproject/cortex:v1.17.0 docker pull quay.io/cortexproject/cortex:v1.17.1 + docker pull quay.io/cortexproject/cortex:v1.18.0 fi docker pull memcached:1.6.1 docker pull redis:7.0.4-alpine @@ -209,11 +210,11 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout Repo - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Install Docker Client run: sudo ./.github/workflows/scripts/install-docker.sh - name: Download Docker Images Artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: Docker Images - name: Extract Docker Images Archive @@ -233,7 +234,7 @@ jobs: image: quay.io/cortexproject/build-image:master-779dcf4ba steps: - name: Checkout Repo - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 with: # web-deploy script expects repo to be cloned with ssh for some commands to work ssh-key: ${{ secrets.WEBSITE_DEPLOY_SSH_PRIVATE_KEY }} @@ -247,7 +248,7 @@ jobs: mkdir -p /go/src/github.com/cortexproject/cortex ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Website Artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: website public path: website/public @@ -275,7 +276,7 @@ jobs: image: quay.io/cortexproject/build-image:master-779dcf4ba steps: - name: Checkout Repo - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Setup Git safe.directory run: | echo "this step is needed because when running in container, actions/checkout does not set safe.directory effectively." @@ -288,7 +289,7 @@ jobs: mkdir -p /go/src/github.com/cortexproject/cortex ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Docker Images Artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: Docker Images - name: Extract Docker Images Archive diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b97cd3b3f..f72d5b4e0b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,27 @@ # Changelog ## master / unreleased -* [CHANGE] Upgrade Dockerfile Node version from 14x to 18x. #5906 + +* [FEATURE] Ruler: Minimize chances of missed rule group evaluations that can occur due to OOM kills, bad underlying nodes, or due to an unhealthy ruler that appears in the ring as healthy. This feature is enabled via `-ruler.enable-ha-evaluation` flag. #6129 +* [ENHANCEMENT] Ruler: Add new ruler metric `cortex_ruler_rule_groups_in_store` that is the total rule groups per tenant in store, which can be used to compare with `cortex_prometheus_rule_group_rules` to count the number of rule groups that are not loaded by a ruler. #5869 +* [ENHANCEMENT] Ingester/Ring: New `READONLY` status on ring to be used by Ingester. New ingester API to change mode of ingester #6163 +* [ENHANCEMENT] Ruler: Add query statistics metrics when --ruler.query-stats-enabled=true. #6173 +* [ENHANCEMENT] Ingester: Add new API `/ingester/all_user_stats` which shows loaded blocks, active timeseries and ingestion rate for a specific ingester. #6178 +* [ENHANCEMENT] Distributor: Add new `cortex_reduced_resolution_histogram_samples_total` metric to to track the number of histogram samples which resolution was reduced. #6182 + +## 1.18.0 2024-09-03 + * [CHANGE] Ingester: Remove `-querier.query-store-for-labels-enabled` flag. Querying long-term store for labels is always enabled. #5984 * [CHANGE] Server: Instrument `cortex_request_duration_seconds` metric with native histogram. If `native-histograms` feature is enabled in monitoring Prometheus then the metric name needs to be updated in your dashboards. #6056 * [CHANGE] Distributor/Ingester: Change `cortex_distributor_ingester_appends_total`, `cortex_distributor_ingester_append_failures_total`, `cortex_distributor_ingester_queries_total`, and `cortex_distributor_ingester_query_failures_total` metrics to use the ingester ID instead of its IP as the label value. #6078 * [CHANGE] OTLP: Set `AddMetricSuffixes` to true to always enable metric name normalization. #6136 +* [CHANGE] Querier: Deprecate and enable by default `querier.ingester-metadata-streaming` flag. #6147 +* [CHANGE] QueryFrontend/QueryScheduler: Deprecate `-querier.max-outstanding-requests-per-tenant` and `-query-scheduler.max-outstanding-requests-per-tenant` flags. Use frontend.max-outstanding-requests-per-tenant instead. #6146 +* [CHANGE] Ingesters: Enable 'snappy-block' compression on ingester clients by default. #6148 +* [CHANGE] Ruler: Scheduling `ruler.evaluation-delay-duration` to be deprecated. Ruler will use the highest value between `ruler.evaluation-delay-duration` and `ruler.query-offset` #6149 +* [CHANGE] Querier: Remove `-querier.at-modifier-enabled` flag. #6157 +* [CHANGE] Tracing: Remove deprecated `oltp_endpoint` config entirely. #6158 +* [CHANGE] Store Gateway: Enable store gateway zone stable shuffle sharding by default. #6161 * [FEATURE] Ingester/Distributor: Experimental: Enable native histogram ingestion via `-blocks-storage.tsdb.enable-native-histograms` flag. #5986 #6010 #6020 * [FEATURE] Querier: Enable querying native histogram chunks. #5944 #6031 * [FEATURE] Query Frontend: Support native histogram in query frontend response. #5996 #6043 @@ -15,20 +31,18 @@ * [FEATURE] OTLP: Support ingesting OTLP exponential metrics as native histograms. #6071 #6135 * [FEATURE] Ingester: Add `ingester.instance-limits.max-inflight-query-requests` to allow limiting ingester concurrent queries. #6081 * [FEATURE] Distributor: Add `validation.max-native-histogram-buckets` to limit max number of bucket count. Distributor will try to automatically reduce histogram resolution until it is within the bucket limit or resolution cannot be reduced anymore. #6104 -* [FEATURE] Store Gateway: Token bucket limiter. #6016 +* [FEATURE] Store Gateway: Introduce token bucket limiter to enhance store gateway throttling. #6016 * [FEATURE] Ruler: Add support for `query_offset` field on RuleGroup and new `ruler_query_offset` per-tenant limit. #6085 -* [ENHANCEMENT] rulers: Add support to persist tokens in rulers. #5987 +* [ENHANCEMENT] Ruler: Add support to persist tokens in rulers. #5987 * [ENHANCEMENT] Query Frontend/Querier: Added store gateway postings touched count and touched size in Querier stats and log in Query Frontend. #5892 * [ENHANCEMENT] Query Frontend/Querier: Returns `warnings` on prometheus query responses. #5916 * [ENHANCEMENT] Ingester: Allowing to configure `-blocks-storage.tsdb.head-compaction-interval` flag up to 30 min and add a jitter on the first head compaction. #5919 #5928 * [ENHANCEMENT] Distributor: Added `max_inflight_push_requests` config to ingester client to protect distributor from OOMKilled. #5917 * [ENHANCEMENT] Distributor/Querier: Clean stale per-ingester metrics after ingester restarts. #5930 * [ENHANCEMENT] Distributor/Ring: Allow disabling detailed ring metrics by ring member. #5931 -* [ENHANCEMENT] KV: Etcd Added etcd.ping-without-stream-allowed parameter to disable/enable PermitWithoutStream #5933 +* [ENHANCEMENT] KV: Etcd Added etcd.ping-without-stream-allowed parameter to disable/enable PermitWithoutStream #5933 * [ENHANCEMENT] Ingester: Add a new `limits_per_label_set` limit. This limit functions similarly to `max_series_per_metric`, but allowing users to define the maximum number of series per LabelSet. #5950 #5993 * [ENHANCEMENT] Store Gateway: Log gRPC requests together with headers configured in `http_request_headers_to_log`. #5958 -* [ENHANCEMENT] Upgrade Alpine to 3.19. #6014 -* [ENHANCEMENT] Upgrade go to 1.22.5 #6014 #6072 * [ENHANCEMENT] Ingester: Add a new experimental `-ingester.labels-string-interning-enabled` flag to enable string interning for metrics labels. #6057 * [ENHANCEMENT] Ingester: Add link to renew 10% of the ingesters tokens in the admin page. #6063 * [ENHANCEMENT] Ruler: Add support for filtering by `state` and `health` field on Rules API. #6040 @@ -40,13 +54,16 @@ * [ENHANCEMENT] Ruler: Add support for filtering by `state` and `health` field on Rules API. #6040 * [ENHANCEMENT] Compactor: Split cleaner cycle for active and deleted tenants. #6112 * [ENHANCEMENT] Compactor: Introduce cleaner visit marker. #6113 +* [ENHANCEMENT] Query Frontend: Add `cortex_query_samples_total` metric. #6142 +* [ENHANCEMENT] Ingester: Implement metadata API limit. #6128 * [BUGFIX] Configsdb: Fix endline issue in db password. #5920 * [BUGFIX] Ingester: Fix `user` and `type` labels for the `cortex_ingester_tsdb_head_samples_appended_total` TSDB metric. #5952 * [BUGFIX] Querier: Enforce max query length check for `/api/v1/series` API even though `ignoreMaxQueryLength` is set to true. #6018 -* [BUGFIX] Ingester: Fix issue with the minimize token generator where it was not taking in consideration the current ownerhip of an instance when generating extra tokens. #6062 -* [BUGFIX] Scheduler: Fix user queue in scheduler that was not thread-safe. #6077 +* [BUGFIX] Ingester: Fix issue with the minimize token generator where it was not taking in consideration the current ownership of an instance when generating extra tokens. #6062 +* [BUGFIX] Scheduler: Fix user queue in scheduler that was not thread-safe. #6077 #6160 * [BUGFIX] Ingester: Include out-of-order head compaction when compacting TSDB head. #6108 * [BUGFIX] Ingester: Fix `cortex_ingester_tsdb_mmap_chunks_total` metric. #6134 +* [BUGFIX] Query Frontend: Fix query rejection bug for metadata queries. #6143 ## 1.17.1 2024-05-20 diff --git a/RELEASE.md b/RELEASE.md index c5b9438db8..078bfa688c 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -33,6 +33,7 @@ Our goal is to provide a new minor release every 6 weeks. This is a new process | v1.15.0 | 2023-03-27 | Ben Ye (@yeya24) | | v1.16.0 | 2023-11-05 | Ben Ye (@yeya24) | | v1.17.0 | 2024-04-25 | Ben Ye (@yeya24) | +| v1.18.0 | 2024-08-16 | Daniel Blando (@danielblando) | ## Release shepherd responsibilities diff --git a/VERSION b/VERSION index 511a76e6fa..84cc529467 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.17.1 +1.18.0 diff --git a/docs/api/_index.md b/docs/api/_index.md index 824a67736b..1ab231ea83 100644 --- a/docs/api/_index.md +++ b/docs/api/_index.md @@ -32,6 +32,8 @@ For the sake of clarity, in this document we have grouped API endpoints by servi | [Flush blocks](#flush-blocks) | Ingester || `GET,POST /ingester/flush` | | [Shutdown](#shutdown) | Ingester || `GET,POST /ingester/shutdown` | | [Ingesters ring status](#ingesters-ring-status) | Ingester || `GET /ingester/ring` | +| [Ingester tenants stats](#ingester-tenants-stats) | Ingester || `GET /ingester/all_user_stats` | +| [Ingester mode](#ingester-mode) | Ingester || `GET,POST /ingester/mode` | | [Instant query](#instant-query) | Querier, Query-frontend || `GET,POST /api/v1/query` | | [Range query](#range-query) | Querier, Query-frontend || `GET,POST /api/v1/query_range` | | [Exemplar query](#exemplar-query) | Querier, Query-frontend || `GET,POST /api/v1/query_exemplars` | @@ -241,7 +243,7 @@ GET /distributor/all_user_stats GET /all_user_stats ``` -Displays a web page with per-tenant statistics updated in realtime, including the total number of active series across all ingesters and the current ingestion rate (samples / sec). +Displays a web page with per-tenant statistics updated in realtime, including the total number of loaded blocks and active series across all ingesters as well as the current ingestion rate (samples / sec). ### HA tracker status @@ -296,6 +298,24 @@ GET /ring Displays a web page with the ingesters hash ring status, including the state, healthy and last heartbeat time of each ingester. +### Ingester tenants stats + +``` +GET /ingester/all_user_stats + +``` + +Displays a web page with per-tenant statistics updated in realtime, including the total number of loaded blocks and active series from a specific ingester as well as the current ingestion rate (samples / sec). + +### Ingester mode + +``` +GET,POST /ingester/mode +``` +Change ingester mode between ACTIVE or READONLY. READONLY ingester does not receive push requests and will only be called for query operations. + +The endpoint accept query param `mode` or POST as `application/x-www-form-urlencoded` with mode type. + ## Querier / Query-frontend @@ -355,9 +375,7 @@ GET,POST /api/v1/series GET,POST /api/v1/series ``` -Find series by label matchers. Differently than Prometheus and due to scalability and performances reasons, if `-querier.query-store-for-labels-enabled` is not set or if `start` param is not specified, Cortex currently always fetches series from data stored in the ingesters. - -If `-querier.query-store-for-labels-enabled` is configured, Cortex also queries the long-term store with the *blocks* storage engine. +Find series by label matchers. Starting from release v1.18.0, Cortex by default honors the `start` and `end` request parameters and fetches series from either ingester, store gateway or both. The special case is that if `start` param is not specified, Cortex currently fetches series from data stored in the ingesters. _For more information, please check out the Prometheus [series endpoint](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers) documentation._ @@ -372,7 +390,7 @@ GET,POST /api/v1/labels GET,POST /api/v1/labels ``` -Get label names of ingested series. Differently than Prometheus and due to scalability and performances reasons, Cortex currently ignores the `start` and `end` request parameters and always fetches the label names from in-memory data stored in the ingesters. There is experimental support to query the long-term store with the *blocks* storage engine when `-querier.query-store-for-labels-enabled` is set. +Get label names of ingested series. Starting from release v1.18.0, Cortex by default honors the `start` and `end` request parameters and fetches label names from either ingester, store gateway or both. _For more information, please check out the Prometheus [get label names](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names) documentation._ @@ -387,7 +405,7 @@ GET /api/v1/label/{name}/values GET /api/v1/label/{name}/values ``` -Get label values for a given label name. Differently than Prometheus and due to scalability and performances reasons, Cortex currently ignores the `start` and `end` request parameters and always fetches the label values from in-memory data stored in the ingesters. There is experimental support to query the long-term store with the *blocks* storage engine when `-querier.query-store-for-labels-enabled` is set. +Get label values for a given label name. Starting from release v1.18.0, Cortex by default honors the `start` and `end` request parameters and fetches label values from either ingester, store gateway or both. _For more information, please check out the Prometheus [get label values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values) documentation._ diff --git a/docs/blocks-storage/compactor.md b/docs/blocks-storage/compactor.md index 04a08cfa64..030035f2ac 100644 --- a/docs/blocks-storage/compactor.md +++ b/docs/blocks-storage/compactor.md @@ -285,6 +285,10 @@ compactor: # CLI flag: -compactor.ring.wait-active-instance-timeout [wait_active_instance_timeout: | default = 10m] + # The compaction strategy to use. Supported values are: default, partitioning. + # CLI flag: -compactor.compaction-mode + [compaction_mode: | default = "default"] + # How long block visit marker file should be considered as expired and able to # be picked up by compactor again. # CLI flag: -compactor.block-visit-marker-timeout diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index a9206c82ba..8cd0b9f008 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -104,9 +104,10 @@ querier: # CLI flag: -querier.timeout [timeout: | default = 2m] - # Use streaming RPCs for metadata APIs from ingester. + # Deprecated (This feature will be always on after v1.18): Use streaming RPCs + # for metadata APIs from ingester. # CLI flag: -querier.ingester-metadata-streaming - [ingester_metadata_streaming: | default = false] + [ingester_metadata_streaming: | default = true] # Maximum number of samples a single query can load into memory. # CLI flag: -querier.max-samples diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 745b612fe7..7db8a27273 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -176,13 +176,6 @@ tenant_federation: [memberlist: ] query_scheduler: - # Deprecated (use frontend.max-outstanding-requests-per-tenant instead) and - # will be removed in v1.17.0: Maximum number of outstanding requests per - # tenant per query-scheduler. In-flight requests above this limit will fail - # with HTTP response status code 429. - # CLI flag: -query-scheduler.max-outstanding-requests-per-tenant - [max_outstanding_requests_per_tenant: | default = 0] - # If a querier disconnects without sending notification about graceful # shutdown, the query-scheduler will keep the querier in the tenant's shard # until the forget delay has passed. This feature is useful to reduce the @@ -2223,6 +2216,10 @@ sharding_ring: # CLI flag: -compactor.ring.wait-active-instance-timeout [wait_active_instance_timeout: | default = 10m] +# The compaction strategy to use. Supported values are: default, partitioning. +# CLI flag: -compactor.compaction-mode +[compaction_mode: | default = "default"] + # How long block visit marker file should be considered as expired and able to # be picked up by compactor again. # CLI flag: -compactor.block-visit-marker-timeout @@ -3027,7 +3024,7 @@ grpc_client_config: # Use compression when sending messages. Supported values are: 'gzip', # 'snappy', 'snappy-block' ,'zstd' and '' (disable compression) # CLI flag: -ingester.client.grpc-compression - [grpc_compression: | default = ""] + [grpc_compression: | default = "snappy-block"] # Rate limit for gRPC client; 0 means disabled. # CLI flag: -ingester.client.grpc-client-rate-limit @@ -3340,6 +3337,7 @@ query_rejection: # them. [query_attributes: | default = []] +# Deprecated(use ruler.query-offset instead) and will be removed in v1.19.0: # Duration to delay the evaluation of rules to ensure the underlying metrics # have been pushed to Cortex. # CLI flag: -ruler.evaluation-delay-duration @@ -3698,9 +3696,10 @@ The `querier_config` configures the Cortex querier. # CLI flag: -querier.timeout [timeout: | default = 2m] -# Use streaming RPCs for metadata APIs from ingester. +# Deprecated (This feature will be always on after v1.18): Use streaming RPCs +# for metadata APIs from ingester. # CLI flag: -querier.ingester-metadata-streaming -[ingester_metadata_streaming: | default = false] +[ingester_metadata_streaming: | default = true] # Maximum number of samples a single query can load into memory. # CLI flag: -querier.max-samples @@ -3834,12 +3833,6 @@ The `query_frontend_config` configures the Cortex query-frontend. # CLI flag: -frontend.query-stats-enabled [query_stats_enabled: | default = false] -# Deprecated (use frontend.max-outstanding-requests-per-tenant instead) and will -# be removed in v1.17.0: Maximum number of outstanding requests per tenant per -# frontend; requests beyond this error with HTTP 429. -# CLI flag: -querier.max-outstanding-requests-per-tenant -[max_outstanding_per_tenant: | default = 0] - # If a querier disconnects without sending notification about graceful shutdown, # the query-frontend will keep the querier in the tenant's shard until the # forget delay has passed. This feature is useful to reduce the blast radius @@ -4149,6 +4142,10 @@ ruler_client: # CLI flag: -ruler.client.tls-insecure-skip-verify [tls_insecure_skip_verify: | default = false] + # Timeout for downstream rulers. + # CLI flag: -ruler.client.remote-timeout + [remote_timeout: | default = 2m] + # How frequently to evaluate rules # CLI flag: -ruler.evaluation-interval [evaluation_interval: | default = 1m] @@ -4347,6 +4344,10 @@ ring: # CLI flag: -ruler.ring.final-sleep [final_sleep: | default = 0s] + # Keep instance in the ring on shut down. + # CLI flag: -ruler.ring.keep-instance-in-the-ring-on-shutdown + [keep_instance_in_the_ring_on_shutdown: | default = false] + # Period with which to attempt to flush rule groups. # CLI flag: -ruler.flush-period [flush_period: | default = 1m] @@ -4373,14 +4374,18 @@ ring: # CLI flag: -ruler.disabled-tenants [disabled_tenants: | default = ""] -# Report the wall time for ruler queries to complete as a per user metric and as -# an info level log message. +# Report query statistics for ruler queries to complete as a per user metric and +# as an info level log message. # CLI flag: -ruler.query-stats-enabled [query_stats_enabled: | default = false] # Disable the rule_group label on exported metrics # CLI flag: -ruler.disable-rule-group-label [disable_rule_group_label: | default = false] + +# Enable high availability +# CLI flag: -ruler.enable-ha-evaluation +[enable_ha_evaluation: | default = false] ``` ### `ruler_storage_config` diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 7c3691d99d..238b04e300 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -23,7 +23,6 @@ The Cortex maintainers commit to ensuring future version of Cortex can read data Cortex strives to be 100% API compatible with Prometheus (under `/prometheus/*` and `/api/prom/*`); any deviation from this is considered a bug, except: -- For queries to the `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/{name}/values` endpoints, query's time range is ignored and the data is always fetched from ingesters. There is experimental support to query the long-term store with the *blocks* storage engine when `-querier.query-store-for-labels-enabled` is set. - Additional API endpoints for creating, removing and modifying alerts and recording rules. - Additional API around pushing metrics (under `/api/push`). - Additional API endpoints for management of Cortex itself, such as the ring. These APIs are not part of the any compatibility guarantees. @@ -53,7 +52,6 @@ Currently experimental features are: - OpenStack Swift storage support. - Metric relabeling in the distributor. - Scalable query-frontend (when using query-scheduler) -- Querying store for series, labels APIs (`-querier.query-store-for-labels-enabled`) - Ingester: do not unregister from ring on shutdown (`-ingester.unregister-on-shutdown=false`) - Distributor: do not extend writes on unhealthy ingesters (`-distributor.extend-writes=false`) - Tenant Deletion in Purger, for blocks storage. diff --git a/docs/guides/limitations.md b/docs/guides/limitations.md index 7376866161..5951371f74 100644 --- a/docs/guides/limitations.md +++ b/docs/guides/limitations.md @@ -35,7 +35,3 @@ All other characters are not safe to use. In particular, slashes `/` and whitesp ### Length The tenant ID length should not exceed 150 bytes/characters. - -## Query series and labels - -When running queries to the `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/{name}/values` endpoints, query's time range is ignored and the data is always fetched from ingesters. There is experimental support to query the long-term store with the *blocks* storage engine when `-querier.query-store-for-labels-enabled` is set. diff --git a/docs/proposals/ruler-ha-new.md b/docs/proposals/ruler-ha-new.md new file mode 100644 index 0000000000..bb8d0a4b87 --- /dev/null +++ b/docs/proposals/ruler-ha-new.md @@ -0,0 +1,53 @@ +--- +title: "Ruler High Availability" +linkTitle: "Ruler High Availability" +weight: 1 +slug: ruler-high-availability +--- + +- Author: [Anand Rajagopal](https://github.com/rajagopalanand) +- Date: Aug 2024 +- Status: Proposed +--- + +## Problem + +Rulers in Cortex currently run with a replication factor of 1, wherein each RuleGroup is assigned to exactly 1 ruler. This lack of redundancy creates the following risks: + +- Rule group evaluation + - Missed evaluations due to a ruler outage, possibly caused by a deployment, noisy neighbour, hardware failure, etc. + - Missed evaluations due to a ruler brownout due to other tenant rule groups sharing the same ruler (noisy neighbour) +- API + - Inconsistent API results during resharding (e.g. due to a deployment) when rulers are in a transition state loading rule groups + +This proposal attempts to mitigate the above risks by enabling a ruler replication factor of greater than 1, allowing multiple rulers to evaluate the same rule group — effectively. + +## Proposal + +### Make ReplicationFactor configurable + +ReplicationFactor in Ruler is currently hardcoded to 1. Making this a configurable parameter is the first step to enabling HA in ruler. The parameter value will be 1 by default. To enable Ruler HA for rule group evaluation, a new flag will be created + +A replication factor greater than 1 will result in the following + + - Ring will pick R rulers for a rule group where R=RF + - The primary ruler (R1), when active, will take ownership of the rule group + - Non-primary ruler R2 will check if R1 is active. If R1 is not active, R2 will take ownership of the rule group + - Non-primary ruler R3 (if RF=3) will check if R1 and R2 are active. If they are both inactive/unhealthy, then R3 will take owership of the rule group + - Non-primary rulers will drop their ownership when R1 becomes active after an outage + +With this redundancy, the maximum duration of missed evaluations will be limited to the sync interval of the rule groups, reducing the impact of primary Ruler unavailability. + +### Prometheus change + +No Prometheus change is required for this proposal + +### API HA + +An interim solution is addressed in this [#5773](https://github.com/cortexproject/cortex/issues/5773) PR. This will be modified such that the replicas will return both active and passive rule groups and the API handler will continue to de-duplicate the results. +The difference is that after Ruler HA, the replicas could potentially return proper rule group state if those replicas evaluated the rule group + +PRs: + +* For Rule evaluation [#6129](https://github.com/cortexproject/cortex/pull/6129) +* For API HA [#5773](https://github.com/cortexproject/cortex/issues/5773) diff --git a/docs/proposals/ruler-ha.md b/docs/proposals/ruler-ha.md index 5e874e1ce0..87d9d1b582 100644 --- a/docs/proposals/ruler-ha.md +++ b/docs/proposals/ruler-ha.md @@ -7,11 +7,13 @@ slug: ruler-ha - Author: [Soon-Ping Phang](https://github.com/soonping-amzn) - Date: June 2022 -- Status: Proposed +- Status: Deprecated --- ## Introduction +_This proposal is deprecated in favor of the new [proposal](./ruler-ha-new.md)_ + This proposal consolidates multiple existing PRs from the AWS team working on this feature, as well as future work needed to complete support. The hope is that a more holistic view will make for more productive discussion and review of the individual changes, as well as provide better tracking of overall progress. The original issue is [#4435](https://github.com/cortexproject/cortex/issues/4435). diff --git a/go.mod b/go.mod index 3667f14bc7..23811e70b1 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.22.5 require ( github.com/Masterminds/squirrel v1.5.4 - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 + github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 github.com/alicebob/miniredis/v2 v2.33.0 github.com/armon/go-metrics v0.4.1 github.com/aws/aws-sdk-go v1.55.5 @@ -16,7 +16,7 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/efficientgo/core v1.0.0-rc.2 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/felixge/fgprof v0.9.4 + github.com/felixge/fgprof v0.9.5 github.com/go-kit/log v0.2.1 github.com/go-openapi/strfmt v0.23.0 github.com/go-openapi/swag v0.23.0 @@ -29,14 +29,14 @@ require ( github.com/gorilla/mux v1.8.1 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/hashicorp/consul/api v1.29.2 + github.com/hashicorp/consul/api v1.29.4 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-sockaddr v1.0.6 github.com/hashicorp/memberlist v0.5.1 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.9 github.com/lib/pq v1.10.9 - github.com/minio/minio-go/v7 v7.0.74 + github.com/minio/minio-go/v7 v7.0.75 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -44,34 +44,34 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.2 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/common v0.58.0 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.53.2-0.20240718123124-e9dec5fc537b + github.com/prometheus/prometheus v0.54.0-rc.0 github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.9.0 github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b - github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647 + github.com/thanos-io/thanos v0.35.2-0.20240904173945-09db52562de0 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.15 go.etcd.io/etcd/client/pkg/v3 v3.5.15 go.etcd.io/etcd/client/v3 v3.5.15 - go.opentelemetry.io/contrib/propagators/aws v1.28.0 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/bridge/opentracing v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/contrib/propagators/aws v1.29.0 + go.opentelemetry.io/otel v1.29.0 + go.opentelemetry.io/otel/bridge/opentracing v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 + go.opentelemetry.io/otel/sdk v1.29.0 + go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/atomic v1.11.0 - golang.org/x/net v0.27.0 + golang.org/x/net v0.28.0 golang.org/x/sync v0.8.0 - golang.org/x/time v0.5.0 + golang.org/x/time v0.6.0 google.golang.org/grpc v1.65.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -83,21 +83,21 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/google/go-cmp v0.6.0 github.com/sercand/kuberesolver/v4 v4.0.0 - go.opentelemetry.io/collector/pdata v1.12.0 + go.opentelemetry.io/collector/pdata v1.13.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 google.golang.org/protobuf v1.34.2 ) require ( - cloud.google.com/go v0.114.0 // indirect - cloud.google.com/go/auth v0.5.1 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.7.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.8 // indirect - cloud.google.com/go/storage v1.40.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 // indirect + cloud.google.com/go/compute/metadata v0.4.0 // indirect + cloud.google.com/go/iam v1.1.10 // indirect + cloud.google.com/go/storage v1.41.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect @@ -148,13 +148,13 @@ require ( github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/pprof v0.0.0-20240528025155-186aa0362fba // indirect + github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -178,7 +178,7 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect - github.com/miekg/dns v1.1.59 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -211,31 +211,31 @@ require ( github.com/zhangyunhao116/umap v0.0.0-20221211160557-cb7705fafa39 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect + go.opentelemetry.io/collector/semconv v0.105.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/contrib/propagators/autoprop v0.53.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect - go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 // indirect - go.opentelemetry.io/contrib/propagators/ot v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.29.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 // indirect + go.opentelemetry.io/contrib/propagators/ot v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.24.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/api v0.183.0 // indirect - google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/api v0.188.0 // indirect + google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect k8s.io/apimachinery v0.30.2 // indirect diff --git a/go.sum b/go.sum index 080ad777e9..7a723d5fc2 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRY cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= -cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -84,8 +84,8 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts= +cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -156,8 +156,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c= +cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -274,8 +274,8 @@ cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQE cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= -cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI= +cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -464,8 +464,8 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= -cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= +cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -524,12 +524,12 @@ cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcP dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= @@ -575,8 +575,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA= @@ -717,12 +717,12 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg= github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA= -github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= -github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4= +github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= +github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -768,8 +768,8 @@ github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= -github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -972,8 +972,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -998,12 +998,12 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= -github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0= +github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= @@ -1021,12 +1021,12 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= -github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk= +github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= +github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= @@ -1080,14 +1080,14 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= -github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= +github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I= +github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible h1:tKTaPHNVwikS3I1rdyf1INNvgJXWSf/+TzqsiGbrgnQ= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -1162,8 +1162,8 @@ github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1 github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= -github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= +github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso= +github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= @@ -1200,14 +1200,14 @@ github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvr github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= -github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0= -github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= +github.com/minio/minio-go/v7 v7.0.75 h1:0uLrB6u6teY2Jt+cJUVi9cTvDRuBKWSRzSAcznRkwlE= +github.com/minio/minio-go/v7 v7.0.75/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -1252,8 +1252,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -1270,8 +1270,8 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/oracle/oci-go-sdk/v65 v65.41.1 h1:+lbosOyNiib3TGJDvLq1HwEAuFqkOjPJDIkyxM15WdQ= github.com/oracle/oci-go-sdk/v65 v65.41.1/go.mod h1:MXMLMzHnnd9wlpgadPkdlkZ9YrwQmCOmbX5kjVEJodw= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= -github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= -github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= +github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1310,8 +1310,8 @@ github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrb github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1327,8 +1327,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.58.0 h1:N+N8vY4/23r6iYfD3UQZUoJPnUYAo7v6LG5XZxjZTXo= +github.com/prometheus/common v0.58.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= @@ -1344,8 +1344,8 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.53.2-0.20240718123124-e9dec5fc537b h1:3XXVgSiLpgc9xXIsmBmgmd9I6bPaAaP/CBwwGV+y92U= -github.com/prometheus/prometheus v0.53.2-0.20240718123124-e9dec5fc537b/go.mod h1:TzWm3Q1bk8bzJ6t7IwnBfzcQvf4FZGUm/M5ynmaqfVI= +github.com/prometheus/prometheus v0.54.0-rc.0 h1:OWyFAuGkQTJOcWOgMHw6HnVjjT3Nv3ZeVo6reb+amy4= +github.com/prometheus/prometheus v0.54.0-rc.0/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= github.com/redis/rueidis v1.0.14-go1.18 h1:dGir5z8w8X1ex7JWO/Zx2FMBrZgQ8Yjm+lw9fPLSNGw= github.com/redis/rueidis v1.0.14-go1.18/go.mod h1:HGekzV3HbmzFmRK6j0xic8Z9119+ECoGMjeN1TV1NYU= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1363,8 +1363,8 @@ github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfF github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= @@ -1415,6 +1415,7 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= @@ -1426,8 +1427,8 @@ github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd h1:YBDmfk3k/eOY github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd/go.mod h1:3ukSkG4rIRUGkKM4oIz+BSuUx2e3RlQVVv3Cc3W+Tv4= github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b h1:V06gjM1OFiJydoClwiGOMCpBWLSpxa5FZBvBc3coQg4= github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b/go.mod h1:Gtv7CJIxGyiGsT+bNDg4nOAsL/bVKLlpfOZUSLSyYfY= -github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647 h1:0qjB7yYBB4LeGw+BWVrEsPMHabYgXjfh2pD2vkuRa9s= -github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647/go.mod h1:4QL7wA5z+Uh4tE6fm4Ar+nqQKgAxWzdOWdcBBjABUvo= +github.com/thanos-io/thanos v0.35.2-0.20240904173945-09db52562de0 h1:tUAKZQYn34cqQqo9PJqmvxIbcPpfps2Biul+w1sAsOg= +github.com/thanos-io/thanos v0.35.2-0.20240904173945-09db52562de0/go.mod h1:h7Nq2a5HXu76HcYg3Ht3JeUUIDDhU7hA9tqyBZKuGuA= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -1476,38 +1477,38 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/collector/pdata v1.13.0 h1:eV3NQt2f1UcaibkziMvGTQI34LlpiYBUGp1yP0G/Cxw= +go.opentelemetry.io/collector/pdata v1.13.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= +go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= +go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/autoprop v0.53.0 h1:4zaVLcJ5mvYw0vlk63TX62qS4qty/4jAY1BKZ1usu18= -go.opentelemetry.io/contrib/propagators/autoprop v0.53.0/go.mod h1:RPlvYtxp5D8PKnRzyPM+rwMQrvzdlfA49Sgworkg7aQ= -go.opentelemetry.io/contrib/propagators/aws v1.28.0 h1:acyTl4oyin/iLr5Nz3u7p/PKHUbLh42w/fqg9LblExk= -go.opentelemetry.io/contrib/propagators/aws v1.28.0/go.mod h1:5WgIv6yG9DvLlSY2uIHrYSeVVwCDCqp4jhwinNNyeT4= -go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= -go.opentelemetry.io/contrib/propagators/b3 v1.28.0/go.mod h1:DWRkzJONLquRz7OJPh2rRbZ7MugQj62rk7g6HRnEqh0= -go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 h1:xQ3ktSVS128JWIaN1DiPGIjcH+GsvkibIAVRWFjS9eM= -go.opentelemetry.io/contrib/propagators/jaeger v1.28.0/go.mod h1:O9HIyI2kVBrFoEwQZ0IN6PHXykGoit4mZV2aEjkTRH4= -go.opentelemetry.io/contrib/propagators/ot v1.28.0 h1:rmlG+2pc5k5M7Y7izDrxAHZUIwDERdGMTD9oMV7llMk= -go.opentelemetry.io/contrib/propagators/ot v1.28.0/go.mod h1:MNgXIn+UrMbNGpd7xyckyo2LCHIgCdmdjEE7YNZGG+w= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/bridge/opentracing v1.28.0 h1:erHvOxIUFnSXj/HuS5SqaKe2CbWSBskONXm2bEBxYgc= -go.opentelemetry.io/otel/bridge/opentracing v1.28.0/go.mod h1:ZMOFThPtIKYiVqzKrU53s41j25Cj27KySyu5Az5jRPU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 h1:h/O1OcNbqrFilsMKfG6MJWWpx8gzCDfn9D+1W7lU3lE= +go.opentelemetry.io/contrib/propagators/autoprop v0.54.0/go.mod h1:VIaPlErTgbng1UhrMA4N6Yy+f94PLA/qRPOCMATdoCs= +go.opentelemetry.io/contrib/propagators/aws v1.29.0 h1:mqadbdNBhn/MVOcNx0dEZAaOaomKKdnsM0QNBmFegiI= +go.opentelemetry.io/contrib/propagators/aws v1.29.0/go.mod h1:3RCUqtGbLbVr6REZv3pQbtqql9GNEpvyB7GiTJhP/nk= +go.opentelemetry.io/contrib/propagators/b3 v1.29.0 h1:hNjyoRsAACnhoOLWupItUjABzeYmX3GTTZLzwJluJlk= +go.opentelemetry.io/contrib/propagators/b3 v1.29.0/go.mod h1:E76MTitU1Niwo5NSN+mVxkyLu4h4h7Dp/yh38F2WuIU= +go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 h1:+YPiqF5rR6PqHBlmEFLPumbSP0gY0WmCGFayXRcCLvs= +go.opentelemetry.io/contrib/propagators/jaeger v1.29.0/go.mod h1:6PD7q7qquWSp3Z4HeM3e/2ipRubaY1rXZO8NIHVDZjs= +go.opentelemetry.io/contrib/propagators/ot v1.29.0 h1:CaJU78FvXrA6ajjp1dOdcABBEjh529+hl396RTqc2LQ= +go.opentelemetry.io/contrib/propagators/ot v1.29.0/go.mod h1:Sc0omwLb4eptUhwOAfYXfmPmErHPu2HV6vkeDge/3sY= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/bridge/opentracing v1.29.0 h1:qrife6xWXoBwBeif0/9nVr+18Gq3+st7kT2iTTKbu5o= +go.opentelemetry.io/otel/bridge/opentracing v1.29.0/go.mod h1:9MckCOAmd8dHQS92890ShcIwkVz/0tF/WvnMUMd9mGQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1545,8 +1546,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1606,8 +1607,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1673,8 +1674,8 @@ golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1704,8 +1705,8 @@ golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1826,8 +1827,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -1835,8 +1836,8 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1852,15 +1853,15 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1928,8 +1929,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2007,8 +2008,8 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= -google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= +google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2146,12 +2147,12 @@ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= -google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b h1:dSTjko30weBaMj3eERKc0ZVXW4GudCswM3m+P++ukU0= +google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= diff --git a/integration/backward_compatibility_test.go b/integration/backward_compatibility_test.go index 410d936e07..e209346528 100644 --- a/integration/backward_compatibility_test.go +++ b/integration/backward_compatibility_test.go @@ -18,20 +18,53 @@ import ( "github.com/cortexproject/cortex/integration/e2ecortex" ) +type versionsImagesFlags struct { + flagsForOldImage func(map[string]string) map[string]string + flagsForNewImage func(map[string]string) map[string]string +} + var ( // If you change the image tag, remember to update it in the preloading done // by GitHub Actions too (see .github/workflows/test-build-deploy.yml). - previousVersionImages = map[string]func(map[string]string) map[string]string{ - "quay.io/cortexproject/cortex:v1.13.1": func(m map[string]string) map[string]string { - m["-ingester.stream-chunks-when-using-blocks"] = "true" - return m + previousVersionImages = map[string]*versionsImagesFlags{ + "quay.io/cortexproject/cortex:v1.13.1": { + flagsForOldImage: func(m map[string]string) map[string]string { + m["-ingester.stream-chunks-when-using-blocks"] = "true" + return m + }, + flagsForNewImage: func(m map[string]string) map[string]string { + m["-ingester.client.grpc-compression"] = "snappy" + return m + }, + }, + "quay.io/cortexproject/cortex:v1.13.2": { + flagsForOldImage: func(m map[string]string) map[string]string { + m["-ingester.stream-chunks-when-using-blocks"] = "true" + return m + }, + flagsForNewImage: func(m map[string]string) map[string]string { + m["-ingester.client.grpc-compression"] = "snappy" + return m + }, + }, + "quay.io/cortexproject/cortex:v1.14.0": { + flagsForOldImage: func(m map[string]string) map[string]string { + return m + }, + flagsForNewImage: func(m map[string]string) map[string]string { + m["-ingester.client.grpc-compression"] = "snappy" + return m + }, }, - "quay.io/cortexproject/cortex:v1.13.2": func(m map[string]string) map[string]string { - m["-ingester.stream-chunks-when-using-blocks"] = "true" - return m + "quay.io/cortexproject/cortex:v1.14.1": { + flagsForOldImage: func(m map[string]string) map[string]string { + return m + }, + flagsForNewImage: func(m map[string]string) map[string]string { + m["-ingester.client.grpc-compression"] = "snappy" + return m + }, }, - "quay.io/cortexproject/cortex:v1.14.0": nil, - "quay.io/cortexproject/cortex:v1.14.1": nil, "quay.io/cortexproject/cortex:v1.15.0": nil, "quay.io/cortexproject/cortex:v1.15.1": nil, "quay.io/cortexproject/cortex:v1.15.2": nil, @@ -40,31 +73,46 @@ var ( "quay.io/cortexproject/cortex:v1.16.1": nil, "quay.io/cortexproject/cortex:v1.17.0": nil, "quay.io/cortexproject/cortex:v1.17.1": nil, + "quay.io/cortexproject/cortex:v1.18.0": nil, } ) func TestBackwardCompatibilityWithBlocksStorage(t *testing.T) { - for previousImage, flagsFn := range previousVersionImages { + for previousImage, imagesFlags := range previousVersionImages { t.Run(fmt.Sprintf("Backward compatibility upgrading from %s", previousImage), func(t *testing.T) { flags := blocksStorageFlagsWithFlushOnShutdown() - if flagsFn != nil { - flags = flagsFn(flags) + var flagsForNewImage func(map[string]string) map[string]string + if imagesFlags != nil { + if imagesFlags.flagsForOldImage != nil { + flags = imagesFlags.flagsForOldImage(flags) + } + + if imagesFlags.flagsForNewImage != nil { + flagsForNewImage = imagesFlags.flagsForNewImage + } } - runBackwardCompatibilityTestWithBlocksStorage(t, previousImage, flags) + runBackwardCompatibilityTestWithBlocksStorage(t, previousImage, flags, flagsForNewImage) }) } } func TestNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T) { - for previousImage, flagsFn := range previousVersionImages { + for previousImage, imagesFlags := range previousVersionImages { t.Run(fmt.Sprintf("Backward compatibility upgrading from %s", previousImage), func(t *testing.T) { flags := blocksStorageFlagsWithFlushOnShutdown() - if flagsFn != nil { - flags = flagsFn(flags) + var flagsForNewImage func(map[string]string) map[string]string + if imagesFlags != nil { + if imagesFlags.flagsForOldImage != nil { + flags = imagesFlags.flagsForOldImage(flags) + } + + if imagesFlags.flagsForNewImage != nil { + flagsForNewImage = imagesFlags.flagsForNewImage + } } - runNewDistributorsCanPushToOldIngestersWithReplication(t, previousImage, flags) + runNewDistributorsCanPushToOldIngestersWithReplication(t, previousImage, flags, flagsForNewImage) }) } } @@ -75,7 +123,7 @@ func blocksStorageFlagsWithFlushOnShutdown() map[string]string { }) } -func runBackwardCompatibilityTestWithBlocksStorage(t *testing.T, previousImage string, flagsForOldImage map[string]string) { +func runBackwardCompatibilityTestWithBlocksStorage(t *testing.T, previousImage string, flagsForOldImage map[string]string, flagsForNewImageFn func(map[string]string) map[string]string) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) defer s.Close() @@ -87,6 +135,10 @@ func runBackwardCompatibilityTestWithBlocksStorage(t *testing.T, previousImage s flagsForNewImage := blocksStorageFlagsWithFlushOnShutdown() + if flagsForNewImageFn != nil { + flagsForNewImage = flagsForNewImageFn(flagsForNewImage) + } + // Start other Cortex components (ingester running on previous version). ingester1 := e2ecortex.NewIngester("ingester-1", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flagsForOldImage, previousImage) distributor := e2ecortex.NewDistributor("distributor", "consul", consul.NetworkHTTPEndpoint(), flagsForNewImage, "") @@ -127,7 +179,7 @@ func runBackwardCompatibilityTestWithBlocksStorage(t *testing.T, previousImage s } // Check for issues like https://github.com/cortexproject/cortex/issues/2356 -func runNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T, previousImage string, flagsForPreviousImage map[string]string) { +func runNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T, previousImage string, flagsForPreviousImage map[string]string, flagsForNewImageFn func(map[string]string) map[string]string) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) defer s.Close() @@ -141,6 +193,10 @@ func runNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T, previo "-distributor.replication-factor": "3", }) + if flagsForNewImageFn != nil { + flagsForNewImage = flagsForNewImageFn(flagsForNewImage) + } + // Start other Cortex components (ingester running on previous version). ingester1 := e2ecortex.NewIngester("ingester-1", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flagsForPreviousImage, previousImage) ingester2 := e2ecortex.NewIngester("ingester-2", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flagsForPreviousImage, previousImage) diff --git a/integration/configs.go b/integration/configs.go index 07d9e78b15..c95a6bdc76 100644 --- a/integration/configs.go +++ b/integration/configs.go @@ -63,14 +63,6 @@ receivers: labels: {} annotations: {} ` - - cortexRulerEvalStaleNanConfigYaml = `groups: -- name: rule - interval: 1s - rules: - - record: stale_nan_eval - expr: a_sometimes_stale_nan_series * 2 -` ) var ( diff --git a/integration/query_frontend_test.go b/integration/query_frontend_test.go index 0af72024c4..a2c81ed95f 100644 --- a/integration/query_frontend_test.go +++ b/integration/query_frontend_test.go @@ -798,4 +798,44 @@ func TestQueryFrontendQueryRejection(t *testing.T) { require.Equal(t, http.StatusUnprocessableEntity, resp.StatusCode) require.Contains(t, string(body), tripperware.QueryRejectErrorMessage) + newRuntimeConfig = []byte(`overrides: + user-1: + query_rejection: + enabled: true + query_attributes: + - query_step_limit: + min: 12s + - api_type: "labels" + - dashboard_uid: "dash123" + panel_id: "panel321" +`) + + require.NoError(t, client.Upload(context.Background(), configFileName, bytes.NewReader(newRuntimeConfig))) + time.Sleep(2 * time.Second) + + // We expect any request for speific api to be rejected if api_type is configured for that api and no other properties provided + resp, body, err = c.LabelNamesRaw([]string{}, time.Time{}, time.Time{}, map[string]string{}) + require.NoError(t, err) + require.Equal(t, http.StatusUnprocessableEntity, resp.StatusCode) + require.Contains(t, string(body), tripperware.QueryRejectErrorMessage) + + // We expect request not to be rejected if all the provided parameters are not applicable for current API type + // There is no dashboardUID or panelId in metadata queries so if only those provided then metadata query shouldn't be rejected. + resp, body, err = c.LabelValuesRaw("cluster", []string{}, time.Time{}, time.Time{}, map[string]string{"User-Agent": "grafana-agent/v0.19.0"}) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + require.NotContains(t, string(body), tripperware.QueryRejectErrorMessage) + + // We expect instant request not to be rejected if only query_step_limit is provided (it's not applicable to instant queries) + resp, body, err = c.QueryRaw("{instance=~\"hello.*\"}", time.Time{}, map[string]string{}) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + require.NotContains(t, string(body), tripperware.QueryRejectErrorMessage) + + // We expect range query request to be rejected even if only query_step_limit is provided + resp, body, err = c.QueryRangeRaw(`rate(test[1m])`, now.Add(-11*time.Hour), now.Add(-8*time.Hour), 20*time.Minute, map[string]string{"X-Dashboard-Uid": "dash123", "User-Agent": "grafana"}) + require.NoError(t, err) + require.Equal(t, http.StatusUnprocessableEntity, resp.StatusCode) + require.Contains(t, string(body), tripperware.QueryRejectErrorMessage) + } diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 2da8db2e01..aec4478af8 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -10,7 +10,6 @@ import ( "crypto/x509/pkix" "encoding/json" "fmt" - "math" "math/rand" "net/http" "os" @@ -24,7 +23,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" - "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -200,119 +198,6 @@ func TestRulerAPISingleBinary(t *testing.T) { require.NoError(t, cortexRestarted.WaitSumMetrics(e2e.Equals(1), "cortex_ruler_managers_total")) } -func TestRulerEvaluationDelay(t *testing.T) { - s, err := e2e.NewScenario(networkName) - require.NoError(t, err) - defer s.Close() - - namespace := "ns" - user := "fake" - - evaluationDelay := time.Minute * 5 - - configOverrides := map[string]string{ - "-ruler-storage.local.directory": filepath.Join(e2e.ContainerSharedDir, "ruler_configs"), - "-ruler.poll-interval": "2s", - "-ruler.rule-path": filepath.Join(e2e.ContainerSharedDir, "rule_tmp/"), - "-ruler.evaluation-delay-duration": evaluationDelay.String(), - } - - // Start Cortex components. - require.NoError(t, copyFileToSharedDir(s, "docs/configuration/single-process-config-blocks-local.yaml", cortexConfigFile)) - require.NoError(t, writeFileToSharedDir(s, filepath.Join("ruler_configs", user, namespace), []byte(cortexRulerEvalStaleNanConfigYaml))) - cortex := e2ecortex.NewSingleBinaryWithConfigFile("cortex", cortexConfigFile, configOverrides, "", 9009, 9095) - require.NoError(t, s.StartAndWaitReady(cortex)) - - // Create a client with the ruler address configured - c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", cortex.HTTPEndpoint(), "") - require.NoError(t, err) - - now := time.Now() - - // Generate series that includes stale nans - samplesToSend := 10 - series := prompb.TimeSeries{ - Labels: []prompb.Label{ - {Name: "__name__", Value: "a_sometimes_stale_nan_series"}, - {Name: "instance", Value: "sometimes-stale"}, - }, - } - series.Samples = make([]prompb.Sample, samplesToSend) - posStale := 2 - - // Create samples, that are delayed by the evaluation delay with increasing values. - for pos := range series.Samples { - series.Samples[pos].Timestamp = e2e.TimeToMilliseconds(now.Add(-evaluationDelay).Add(time.Duration(pos) * time.Second)) - series.Samples[pos].Value = float64(pos + 1) - - // insert staleness marker at the positions marked by posStale - if pos == posStale { - series.Samples[pos].Value = math.Float64frombits(value.StaleNaN) - } - } - - // Insert metrics - res, err := c.Push([]prompb.TimeSeries{series}) - require.NoError(t, err) - require.Equal(t, 200, res.StatusCode) - - // Get number of rule evaluations just after push - ruleEvaluationsAfterPush, err := cortex.SumMetrics([]string{"cortex_prometheus_rule_evaluations_total"}) - require.NoError(t, err) - - // Wait until the rule is evaluated for the first time - require.NoError(t, cortex.WaitSumMetrics(e2e.Greater(ruleEvaluationsAfterPush[0]), "cortex_prometheus_rule_evaluations_total")) - - // Query the timestamp of the latest result to ensure the evaluation is delayed - result, err := c.Query("timestamp(stale_nan_eval)", now) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - - vector := result.(model.Vector) - require.Equal(t, 1, vector.Len(), "expect one sample returned") - - // 290 seconds gives 10 seconds of slack between the rule evaluation and the query - // to account for CI latency, but ensures the latest evaluation was in the past. - var maxDiff int64 = 290_000 - require.GreaterOrEqual(t, e2e.TimeToMilliseconds(time.Now())-int64(vector[0].Value)*1000, maxDiff) - - // Wait until all the pushed samples have been evaluated by the rule. This - // ensures that rule results are successfully written even after a - // staleness period. - require.NoError(t, cortex.WaitSumMetrics(e2e.GreaterOrEqual(ruleEvaluationsAfterPush[0]+float64(samplesToSend)), "cortex_prometheus_rule_evaluations_total")) - - // query all results to verify rules have been evaluated correctly - result, err = c.QueryRange("stale_nan_eval", now.Add(-evaluationDelay), now, time.Second) - require.NoError(t, err) - require.Equal(t, model.ValMatrix, result.Type()) - - matrix := result.(model.Matrix) - require.GreaterOrEqual(t, 1, matrix.Len(), "expect at least a series returned") - - // Iterate through the values recorded and ensure they exist as expected. - inputPos := 0 - for _, m := range matrix { - for _, v := range m.Values { - // Skip values for stale positions - if inputPos == posStale { - inputPos++ - } - - expectedValue := model.SampleValue(2 * (inputPos + 1)) - require.Equal(t, expectedValue, v.Value) - - // Look for next value - inputPos++ - - // We have found all input values - if inputPos >= len(series.Samples) { - break - } - } - } - require.Equal(t, len(series.Samples), inputPos, "expect to have returned all evaluations") -} - func TestRulerSharding(t *testing.T) { const numRulesGroups = 100 @@ -382,6 +267,9 @@ func TestRulerSharding(t *testing.T) { // between the two rulers. require.NoError(t, ruler1.WaitSumMetrics(e2e.Less(numRulesGroups), "cortex_prometheus_rule_group_rules")) require.NoError(t, ruler2.WaitSumMetrics(e2e.Less(numRulesGroups), "cortex_prometheus_rule_group_rules")) + // Even with rules sharded, we expect rulers to have the same cortex_ruler_rule_groups_in_store metric values + require.NoError(t, ruler1.WaitSumMetrics(e2e.Equals(numRulesGroups), "cortex_ruler_rule_groups_in_store")) + require.NoError(t, ruler2.WaitSumMetrics(e2e.Equals(numRulesGroups), "cortex_ruler_rule_groups_in_store")) // Fetch the rules and ensure they match the configured ones. actualGroups, err := c.GetPrometheusRules(e2ecortex.DefaultFilter) @@ -1093,6 +981,152 @@ func TestRulerDisablesRuleGroups(t *testing.T) { }) } +func TestRulerHAEvaluation(t *testing.T) { + const numRulesGroups = 20 + + random := rand.New(rand.NewSource(time.Now().UnixNano())) + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Generate multiple rule groups, with 1 rule each. + ruleGroups := make([]rulefmt.RuleGroup, numRulesGroups) + expectedNames := make([]string, numRulesGroups) + evalInterval, _ := model.ParseDuration("2s") + for i := 0; i < numRulesGroups; i++ { + num := random.Intn(10) + var ruleNode yaml.Node + var exprNode yaml.Node + + ruleNode.SetString(fmt.Sprintf("rule_%d", i)) + exprNode.SetString(strconv.Itoa(i)) + ruleName := fmt.Sprintf("test_%d", i) + + expectedNames[i] = ruleName + + if num%2 == 0 { + ruleGroups[i] = rulefmt.RuleGroup{ + Name: ruleName, + Interval: evalInterval, + Rules: []rulefmt.RuleNode{{ + Alert: ruleNode, + Expr: exprNode, + }}, + } + } else { + ruleGroups[i] = rulefmt.RuleGroup{ + Name: ruleName, + Interval: evalInterval, + Rules: []rulefmt.RuleNode{{ + Record: ruleNode, + Expr: exprNode, + }}, + } + } + } + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, rulestoreBucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Configure the ruler. + overrides := map[string]string{ + // Since we're not going to run any rule, we don't need the + // store-gateway to be configured to a valid address. + "-querier.store-gateway-addresses": "localhost:12345", + // Enable the bucket index so we can skip the initial bucket scan. + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-ruler.ring.replication-factor": "2", + "-ruler.enable-ha-evaluation": "true", + "-ruler.poll-interval": "5s", + "-ruler.client.remote-timeout": "10ms", + } + + rulerFlags := mergeFlags( + BlocksStorageFlags(), + RulerFlags(), + RulerShardingFlags(consul.NetworkHTTPEndpoint()), + overrides, + ) + + // Start rulers. + ruler1 := e2ecortex.NewRuler("ruler-1", consul.NetworkHTTPEndpoint(), rulerFlags, "") + ruler2 := e2ecortex.NewRuler("ruler-2", consul.NetworkHTTPEndpoint(), rulerFlags, "") + ruler3 := e2ecortex.NewRuler("ruler-3", consul.NetworkHTTPEndpoint(), rulerFlags, "") + rulers := e2ecortex.NewCompositeCortexService(ruler1, ruler2, ruler3) + require.NoError(t, s.StartAndWaitReady(ruler1, ruler2, ruler3)) + + // Upload rule groups to one of the rulers. + c, err := e2ecortex.NewClient("", "", "", ruler1.HTTPEndpoint(), "user-1") + require.NoError(t, err) + namespaceNames := []string{"test1", "test2", "test3", "test4", "test5"} + namespaceNameCount := make([]int, len(namespaceNames)) + nsRand := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, ruleGroup := range ruleGroups { + index := nsRand.Intn(len(namespaceNames)) + namespaceNameCount[index] = namespaceNameCount[index] + 1 + require.NoError(t, c.SetRuleGroup(ruleGroup, namespaceNames[index])) + } + + // Wait until rulers have loaded all rules. + require.NoError(t, rulers.WaitSumMetricsWithOptions(e2e.Equals(numRulesGroups), []string{"cortex_prometheus_rule_group_rules"}, e2e.WaitMissingMetrics)) + + ruler1SyncTotal, err := ruler1.SumMetrics([]string{"cortex_ruler_sync_rules_total"}) + require.NoError(t, err) + ruler3SyncTotal, err := ruler3.SumMetrics([]string{"cortex_ruler_sync_rules_total"}) + require.NoError(t, err) + + err = consul.Kill() // kill consul so the rulers will operate with the tokens/instances they already have + require.NoError(t, err) + + err = ruler2.Kill() + require.NoError(t, err) + + // wait for another sync + require.NoError(t, ruler1.WaitSumMetrics(e2e.Greater(ruler1SyncTotal[0]), "cortex_ruler_sync_rules_total")) + require.NoError(t, ruler3.WaitSumMetrics(e2e.Greater(ruler3SyncTotal[0]), "cortex_ruler_sync_rules_total")) + + rulers = e2ecortex.NewCompositeCortexService(ruler1, ruler3) + require.NoError(t, rulers.WaitSumMetricsWithOptions(e2e.Equals(numRulesGroups), []string{"cortex_prometheus_rule_group_rules"}, e2e.WaitMissingMetrics)) + + t.Log(ruler1.SumMetrics([]string{"cortex_prometheus_rule_group_rules"})) + t.Log(ruler3.SumMetrics([]string{"cortex_prometheus_rule_group_rules"})) + + c3, err := e2ecortex.NewClient("", "", "", ruler3.HTTPEndpoint(), "user-1") + require.NoError(t, err) + + ruler1Rules, err := c.GetRuleGroups() + require.NoError(t, err) + + ruler3Rules, err := c3.GetRuleGroups() + require.NoError(t, err) + + ruleCount := 0 + countFunc := func(ruleGroups map[string][]rulefmt.RuleGroup) { + for _, v := range ruleGroups { + ruleCount += len(v) + } + } + + countFunc(ruler1Rules) + require.Equal(t, numRulesGroups, ruleCount) + ruleCount = 0 + countFunc(ruler3Rules) + require.Equal(t, numRulesGroups, ruleCount) + + // each rule group in this test is set to evaluate at a 2 second interval. If a Ruler is down and another Ruler + // assumes ownership, it might not immediately evaluate until it's time to evaluate. The following sleep is to ensure the + // rulers have evaluated the rule groups + time.Sleep(2100 * time.Millisecond) + results, err := c.GetPrometheusRules(e2ecortex.RuleFilter{}) + require.NoError(t, err) + require.Equal(t, numRulesGroups, len(results)) + for _, v := range results { + require.False(t, v.LastEvaluation.IsZero()) + } +} + func TestRulerKeepFiring(t *testing.T) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) @@ -1237,7 +1271,12 @@ type Alert struct { Value string `json:"value"` } -func alertRuleWithKeepFiringFor(groupName string, ruleName string, expression string, keepFiring model.Duration) rulefmt.RuleGroup { +func ruleGroupMatcher(user, namespace, groupName string) *labels.Matcher { + return labels.MustNewMatcher(labels.MatchEqual, "rule_group", fmt.Sprintf("/rules/%s/%s;%s", user, namespace, groupName)) +} + +func ruleGroupWithRule(groupName string, ruleName string, expression string) rulefmt.RuleGroup { + // Prepare rule group with invalid rule. var recordNode = yaml.Node{} var exprNode = yaml.Node{} @@ -1248,19 +1287,13 @@ func alertRuleWithKeepFiringFor(groupName string, ruleName string, expression st Name: groupName, Interval: 10, Rules: []rulefmt.RuleNode{{ - Alert: recordNode, - Expr: exprNode, - KeepFiringFor: keepFiring, + Record: recordNode, + Expr: exprNode, }}, } } -func ruleGroupMatcher(user, namespace, groupName string) *labels.Matcher { - return labels.MustNewMatcher(labels.MatchEqual, "rule_group", fmt.Sprintf("/rules/%s/%s;%s", user, namespace, groupName)) -} - -func ruleGroupWithRule(groupName string, ruleName string, expression string) rulefmt.RuleGroup { - // Prepare rule group with invalid rule. +func alertRuleWithKeepFiringFor(groupName string, ruleName string, expression string, keepFiring model.Duration) rulefmt.RuleGroup { var recordNode = yaml.Node{} var exprNode = yaml.Node{} @@ -1271,8 +1304,9 @@ func ruleGroupWithRule(groupName string, ruleName string, expression string) rul Name: groupName, Interval: 10, Rules: []rulefmt.RuleNode{{ - Record: recordNode, - Expr: exprNode, + Alert: recordNode, + Expr: exprNode, + KeepFiringFor: keepFiring, }}, } } diff --git a/pkg/api/api.go b/pkg/api/api.go index 2ad90a0963..660c7ceb28 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -287,6 +287,8 @@ type Ingester interface { FlushHandler(http.ResponseWriter, *http.Request) ShutdownHandler(http.ResponseWriter, *http.Request) RenewTokenHandler(http.ResponseWriter, *http.Request) + AllUserStatsHandler(http.ResponseWriter, *http.Request) + ModeHandler(http.ResponseWriter, *http.Request) Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) } @@ -294,12 +296,18 @@ type Ingester interface { func (a *API) RegisterIngester(i Ingester, pushConfig distributor.Config) { client.RegisterIngesterServer(a.server.GRPC, i) + a.indexPage.AddLink(SectionAdminEndpoints, "/ingester/all_user_stats", "Usage Statistics") + a.indexPage.AddLink(SectionDangerous, "/ingester/flush", "Trigger a Flush of data from Ingester to storage") a.indexPage.AddLink(SectionDangerous, "/ingester/shutdown", "Trigger Ingester Shutdown (Dangerous)") a.indexPage.AddLink(SectionDangerous, "/ingester/renewTokens", "Renew Ingester Tokens (10%)") + a.indexPage.AddLink(SectionDangerous, "/ingester/mode?mode=READONLY", "Set Ingester to READONLY mode") + a.indexPage.AddLink(SectionDangerous, "/ingester/mode?mode=ACTIVE", "Set Ingester to ACTIVE mode") a.RegisterRoute("/ingester/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST") a.RegisterRoute("/ingester/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST") a.RegisterRoute("/ingester/renewTokens", http.HandlerFunc(i.RenewTokenHandler), false, "GET", "POST") + a.RegisterRoute("/ingester/all_user_stats", http.HandlerFunc(i.AllUserStatsHandler), false, "GET") + a.RegisterRoute("/ingester/mode", http.HandlerFunc(i.ModeHandler), false, "GET", "POST") a.RegisterRoute("/ingester/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. // Legacy Routes diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index ff7907fe2c..817f93572b 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -53,9 +53,12 @@ var ( errInvalidBlockRanges = "compactor block range periods should be divisible by the previous one, but %s is not divisible by %s" RingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, nil) - supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle} - errInvalidShardingStrategy = errors.New("invalid sharding strategy") - errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than 0") + supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle} + errInvalidShardingStrategy = errors.New("invalid sharding strategy") + errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than 0") + supportedCompactionStrategies = []string{util.CompactionStrategyDefault, util.CompactionStrategyPartitioning} + errInvalidCompactionStrategy = errors.New("invalid compaction strategy") + errInvalidCompactionStrategyPartitioning = errors.New("compaction strategy partitioning can only be enabled when shuffle sharding is enabled") DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.InstrumentedBucket, logger log.Logger, blocksMarkedForNoCompaction prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, syncerMetrics *compact.SyncerMetrics, compactorMetrics *compactorMetrics, _ *ring.Ring, _ *ring.Lifecycler, _ Limits, _ string, _ *compact.GatherNoCompactionMarkFilter) compact.Grouper { return compact.NewDefaultGrouperWithMetrics( @@ -77,29 +80,33 @@ var ( } ShuffleShardingGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.InstrumentedBucket, logger log.Logger, blocksMarkedForNoCompaction prometheus.Counter, blockVisitMarkerReadFailed prometheus.Counter, blockVisitMarkerWriteFailed prometheus.Counter, syncerMetrics *compact.SyncerMetrics, compactorMetrics *compactorMetrics, ring *ring.Ring, ringLifecycle *ring.Lifecycler, limits Limits, userID string, noCompactionMarkFilter *compact.GatherNoCompactionMarkFilter) compact.Grouper { - return NewShuffleShardingGrouper( - ctx, - logger, - bkt, - cfg.AcceptMalformedIndex, - true, // Enable vertical compaction - blocksMarkedForNoCompaction, - metadata.NoneFunc, - syncerMetrics, - compactorMetrics, - cfg, - ring, - ringLifecycle.Addr, - ringLifecycle.ID, - limits, - userID, - cfg.BlockFilesConcurrency, - cfg.BlocksFetchConcurrency, - cfg.CompactionConcurrency, - cfg.BlockVisitMarkerTimeout, - blockVisitMarkerReadFailed, - blockVisitMarkerWriteFailed, - noCompactionMarkFilter.NoCompactMarkedBlocks) + if cfg.CompactionStrategy == util.CompactionStrategyPartitioning { + return NewPartitionCompactionGrouper(ctx, logger, bkt) + } else { + return NewShuffleShardingGrouper( + ctx, + logger, + bkt, + cfg.AcceptMalformedIndex, + true, // Enable vertical compaction + blocksMarkedForNoCompaction, + metadata.NoneFunc, + syncerMetrics, + compactorMetrics, + cfg, + ring, + ringLifecycle.Addr, + ringLifecycle.ID, + limits, + userID, + cfg.BlockFilesConcurrency, + cfg.BlocksFetchConcurrency, + cfg.CompactionConcurrency, + cfg.BlockVisitMarkerTimeout, + blockVisitMarkerReadFailed, + blockVisitMarkerWriteFailed, + noCompactionMarkFilter.NoCompactMarkedBlocks) + } } DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, PlannerFactory, error) { @@ -123,7 +130,11 @@ var ( plannerFactory := func(ctx context.Context, bkt objstore.InstrumentedBucket, logger log.Logger, cfg Config, noCompactionMarkFilter *compact.GatherNoCompactionMarkFilter, ringLifecycle *ring.Lifecycler, userID string, blockVisitMarkerReadFailed prometheus.Counter, blockVisitMarkerWriteFailed prometheus.Counter, compactorMetrics *compactorMetrics) compact.Planner { - return NewShuffleShardingPlanner(ctx, bkt, logger, cfg.BlockRanges.ToMilliseconds(), noCompactionMarkFilter.NoCompactMarkedBlocks, ringLifecycle.ID, cfg.BlockVisitMarkerTimeout, cfg.BlockVisitMarkerFileUpdateInterval, blockVisitMarkerReadFailed, blockVisitMarkerWriteFailed) + if cfg.CompactionStrategy == util.CompactionStrategyPartitioning { + return NewPartitionCompactionPlanner(ctx, bkt, logger) + } else { + return NewShuffleShardingPlanner(ctx, bkt, logger, cfg.BlockRanges.ToMilliseconds(), noCompactionMarkFilter.NoCompactMarkedBlocks, ringLifecycle.ID, cfg.BlockVisitMarkerTimeout, cfg.BlockVisitMarkerFileUpdateInterval, blockVisitMarkerReadFailed, blockVisitMarkerWriteFailed) + } } return compactor, plannerFactory, nil } @@ -202,6 +213,9 @@ type Config struct { ShardingStrategy string `yaml:"sharding_strategy"` ShardingRing RingConfig `yaml:"sharding_ring"` + // Compaction mode. + CompactionStrategy string `yaml:"compaction_mode"` + // No need to add options to customize the retry backoff, // given the defaults should be fine, but allow to override // it in tests. @@ -244,6 +258,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.CleanupConcurrency, "compactor.cleanup-concurrency", 20, "Max number of tenants for which blocks cleanup and maintenance should run concurrently.") f.BoolVar(&cfg.ShardingEnabled, "compactor.sharding-enabled", false, "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.") f.StringVar(&cfg.ShardingStrategy, "compactor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) + f.StringVar(&cfg.CompactionStrategy, "compactor.compaction-mode", util.CompactionStrategyDefault, fmt.Sprintf("The compaction strategy to use. Supported values are: %s.", strings.Join(supportedCompactionStrategies, ", "))) f.DurationVar(&cfg.DeletionDelay, "compactor.deletion-delay", 12*time.Hour, "Time before a block marked for deletion is deleted from bucket. "+ "If not 0, blocks will be marked for deletion and compactor component will permanently delete blocks marked for deletion from the bucket. "+ "If 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures.") @@ -290,6 +305,15 @@ func (cfg *Config) Validate(limits validation.Limits) error { } } + // Make sure a valid compaction mode is being used + if !util.StringsContain(supportedCompactionStrategies, cfg.CompactionStrategy) { + return errInvalidCompactionStrategy + } + + if !cfg.ShardingEnabled && cfg.CompactionStrategy == util.CompactionStrategyPartitioning { + return errInvalidCompactionStrategyPartitioning + } + return nil } diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 908f962cf2..cf76ff735a 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -263,26 +263,12 @@ func TestCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", - "cortex_compactor_garbage_collected_blocks_total", - "cortex_compactor_garbage_collection_duration_seconds", - "cortex_compactor_garbage_collection_failures_total", - "cortex_compactor_garbage_collection_total", - "cortex_compactor_meta_sync_consistency_delay_seconds", - "cortex_compactor_meta_sync_duration_seconds", - "cortex_compactor_meta_sync_failures_total", - "cortex_compactor_meta_syncs_total", - "cortex_compactor_group_compaction_runs_completed_total", - "cortex_compactor_group_compaction_runs_started_total", - "cortex_compactor_group_compactions_failures_total", - "cortex_compactor_group_compactions_total", - "cortex_compactor_group_vertical_compactions_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_cleaned_total", - "cortex_compactor_blocks_marked_for_deletion_total", "cortex_compactor_blocks_marked_for_no_compaction_total", + "cortex_compactor_meta_sync_consistency_delay_seconds", "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", - "cortex_compactor_block_cleanup_failed_total", )) } @@ -350,25 +336,10 @@ func TestCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersFromBucket "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", - "cortex_compactor_garbage_collected_blocks_total", - "cortex_compactor_garbage_collection_duration_seconds", - "cortex_compactor_garbage_collection_failures_total", - "cortex_compactor_garbage_collection_total", - "cortex_compactor_meta_sync_consistency_delay_seconds", - "cortex_compactor_meta_sync_duration_seconds", - "cortex_compactor_meta_sync_failures_total", - "cortex_compactor_meta_syncs_total", - "cortex_compactor_group_compaction_runs_completed_total", - "cortex_compactor_group_compaction_runs_started_total", - "cortex_compactor_group_compactions_failures_total", - "cortex_compactor_group_compactions_total", - "cortex_compactor_group_vertical_compactions_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_cleaned_total", - "cortex_compactor_blocks_marked_for_deletion_total", "cortex_compactor_blocks_marked_for_no_compaction_total", - "cortex_compactor_block_cleanup_started_total", - "cortex_compactor_block_cleanup_completed_total", + "cortex_compactor_meta_sync_consistency_delay_seconds", "cortex_compactor_block_cleanup_failed_total", )) } @@ -572,7 +543,7 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { testedMetrics := []string{ "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", "cortex_compactor_blocks_cleaned_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_marked_for_deletion_total", - "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_block_cleanup_failed_total", + "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_blocks_marked_for_no_compaction_total", } assert.NoError(t, prom_testutil.GatherAndCompare(registry, strings.NewReader(` @@ -698,7 +669,7 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { testedMetrics := []string{ "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", "cortex_compactor_blocks_cleaned_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_marked_for_deletion_total", - "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_block_cleanup_failed_total", + "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_blocks_marked_for_no_compaction_total", } assert.NoError(t, prom_testutil.GatherAndCompare(registry, strings.NewReader(` @@ -887,9 +858,8 @@ func TestCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *testing.T) // Real shipper metrics are too variable to embed into a test. testedMetrics := []string{ "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", - "cortex_compactor_blocks_cleaned_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_marked_for_deletion_total", - "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_block_cleanup_failed_total", - "cortex_bucket_blocks_count", "cortex_bucket_blocks_marked_for_deletion_count", "cortex_bucket_index_last_successful_update_timestamp_seconds", + "cortex_compactor_blocks_cleaned_total", "cortex_compactor_block_cleanup_failures_total", + "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_blocks_marked_for_no_compaction_total", } assert.NoError(t, prom_testutil.GatherAndCompare(registry, strings.NewReader(` @@ -2081,7 +2051,6 @@ func TestCompactor_FailedWithRetriableError(t *testing.T) { # TYPE cortex_compactor_compaction_error_total counter cortex_compactor_compaction_error_total{type="retriable", user="user-1"} 2 `), - "cortex_compactor_compaction_retry_error_total", "cortex_compactor_compaction_error_total", )) } @@ -2135,7 +2104,6 @@ func TestCompactor_FailedWithHaltError(t *testing.T) { # TYPE cortex_compactor_compaction_error_total counter cortex_compactor_compaction_error_total{type="halt", user="user-1"} 1 `), - "cortex_compactor_compaction_retry_error_total", "cortex_compactor_compaction_error_total", )) } diff --git a/pkg/compactor/partition_compaction_grouper.go b/pkg/compactor/partition_compaction_grouper.go new file mode 100644 index 0000000000..c3687f7e6a --- /dev/null +++ b/pkg/compactor/partition_compaction_grouper.go @@ -0,0 +1,38 @@ +package compactor + +import ( + "context" + + "github.com/go-kit/log" + "github.com/oklog/ulid" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/compact" +) + +type PartitionCompactionGrouper struct { + ctx context.Context + logger log.Logger + bkt objstore.InstrumentedBucket +} + +func NewPartitionCompactionGrouper( + ctx context.Context, + logger log.Logger, + bkt objstore.InstrumentedBucket, +) *PartitionCompactionGrouper { + if logger == nil { + logger = log.NewNopLogger() + } + + return &PartitionCompactionGrouper{ + ctx: ctx, + logger: logger, + bkt: bkt, + } +} + +// Groups function modified from https://github.com/cortexproject/cortex/pull/2616 +func (g *PartitionCompactionGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*compact.Group, err error) { + panic("PartitionCompactionGrouper not implemented") +} diff --git a/pkg/compactor/partition_compaction_planner.go b/pkg/compactor/partition_compaction_planner.go new file mode 100644 index 0000000000..963771aa6d --- /dev/null +++ b/pkg/compactor/partition_compaction_planner.go @@ -0,0 +1,31 @@ +package compactor + +import ( + "context" + + "github.com/go-kit/log" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block/metadata" +) + +type PartitionCompactionPlanner struct { + ctx context.Context + bkt objstore.InstrumentedBucket + logger log.Logger +} + +func NewPartitionCompactionPlanner( + ctx context.Context, + bkt objstore.InstrumentedBucket, + logger log.Logger, +) *PartitionCompactionPlanner { + return &PartitionCompactionPlanner{ + ctx: ctx, + bkt: bkt, + logger: logger, + } +} + +func (p *PartitionCompactionPlanner) Plan(ctx context.Context, metasByMinTime []*metadata.Meta, errChan chan error, extensions any) ([]*metadata.Meta, error) { + panic("PartitionCompactionPlanner not implemented") +} diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 664a221b10..351187ba83 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/storage" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/user" @@ -28,6 +29,7 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/ha" + "github.com/cortexproject/cortex/pkg/ingester" ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" ring_client "github.com/cortexproject/cortex/pkg/ring/client" @@ -671,7 +673,7 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co return nil, err } // If there wasn't an error but removeReplica is false that means we didn't find both HA labels. - if !removeReplica { + if !removeReplica { // False, Nil d.nonHASamples.WithLabelValues(userID).Add(float64(numFloatSamples + numHistogramSamples)) } } @@ -1021,7 +1023,7 @@ func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring }) } -func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, to model.Time, labelName model.LabelName, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error), matchers ...*labels.Matcher) ([]string, error) { +func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, to model.Time, labelName model.LabelName, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error), matchers ...*labels.Matcher) ([]string, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Distributor.LabelValues", opentracing.Tags{ "name": labelName, "start": from.Unix(), @@ -1032,8 +1034,8 @@ func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, t if err != nil { return nil, err } - - req, err := ingester_client.ToLabelValuesRequest(labelName, from, to, matchers) + limit := getLimitFromLabelHints(hints) + req, err := ingester_client.ToLabelValuesRequest(labelName, from, to, limit, matchers) if err != nil { return nil, err } @@ -1053,13 +1055,16 @@ func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, t if err != nil { return nil, err } + if limit > 0 && len(r) > limit { + r = r[:limit] + } span.SetTag("result_length", len(r)) return r, nil } // LabelValuesForLabelName returns all the label values that are associated with a given label name. -func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, labelName model.LabelName, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error) { +func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, labelName model.LabelName, hint *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { + return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error) { return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { resp, err := client.LabelValues(ctx, req) if err != nil { @@ -1071,8 +1076,8 @@ func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to mode } // LabelValuesForLabelNameStream returns all the label values that are associated with a given label name. -func (d *Distributor) LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, labelName model.LabelName, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error) { +func (d *Distributor) LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, labelName model.LabelName, hint *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { + return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error) { return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { stream, err := client.LabelValuesStream(ctx, req) if err != nil { @@ -1096,7 +1101,7 @@ func (d *Distributor) LabelValuesForLabelNameStream(ctx context.Context, from, t }, matchers...) } -func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error)) ([]string, error) { +func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error)) ([]string, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Distributor.LabelNames", opentracing.Tags{ "start": from.Unix(), "end": to.Unix(), @@ -1107,9 +1112,11 @@ func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, return nil, err } + limit := getLimitFromLabelHints(hints) req := &ingester_client.LabelNamesRequest{ StartTimestampMs: int64(from), EndTimestampMs: int64(to), + Limit: int64(limit), } resps, err := f(ctx, replicationSet, req) if err != nil { @@ -1126,13 +1133,17 @@ func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, if err != nil { return nil, err } + if limit > 0 && len(r) > limit { + r = r[:limit] + } + span.SetTag("result_length", len(r)) return r, nil } -func (d *Distributor) LabelNamesStream(ctx context.Context, from, to model.Time) ([]string, error) { - return d.LabelNamesCommon(ctx, from, to, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error) { +func (d *Distributor) LabelNamesStream(ctx context.Context, from, to model.Time, hints *storage.LabelHints) ([]string, error) { + return d.LabelNamesCommon(ctx, from, to, hints, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error) { return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { stream, err := client.LabelNamesStream(ctx, req) if err != nil { @@ -1157,8 +1168,8 @@ func (d *Distributor) LabelNamesStream(ctx context.Context, from, to model.Time) } // LabelNames returns all the label names. -func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time) ([]string, error) { - return d.LabelNamesCommon(ctx, from, to, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error) { +func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time, hint *storage.LabelHints) ([]string, error) { + return d.LabelNamesCommon(ctx, from, to, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error) { return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { resp, err := client.LabelNames(ctx, req) if err != nil { @@ -1170,8 +1181,8 @@ func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time) ([]st } // MetricsForLabelMatchers gets the metrics that match said matchers -func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) { - return d.metricsForLabelMatchersCommon(ctx, from, through, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]model.Metric, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { +func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, hint *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) { + return d.metricsForLabelMatchersCommon(ctx, from, through, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]model.Metric, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { _, err := d.ForReplicationSet(ctx, rs, false, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { resp, err := client.MetricsForLabelMatchers(ctx, req) if err != nil { @@ -1199,8 +1210,8 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through }, matchers...) } -func (d *Distributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) { - return d.metricsForLabelMatchersCommon(ctx, from, through, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]model.Metric, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { +func (d *Distributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, hint *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) { + return d.metricsForLabelMatchersCommon(ctx, from, through, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]model.Metric, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { _, err := d.ForReplicationSet(ctx, rs, false, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { stream, err := client.MetricsForLabelMatchersStream(ctx, req) if err != nil { @@ -1239,14 +1250,14 @@ func (d *Distributor) MetricsForLabelMatchersStream(ctx context.Context, from, t }, matchers...) } -func (d *Distributor) metricsForLabelMatchersCommon(ctx context.Context, from, through model.Time, f func(context.Context, ring.ReplicationSet, *ingester_client.MetricsForLabelMatchersRequest, *map[model.Fingerprint]model.Metric, *sync.Mutex, *limiter.QueryLimiter) error, matchers ...*labels.Matcher) ([]model.Metric, error) { +func (d *Distributor) metricsForLabelMatchersCommon(ctx context.Context, from, through model.Time, hints *storage.SelectHints, f func(context.Context, ring.ReplicationSet, *ingester_client.MetricsForLabelMatchersRequest, *map[model.Fingerprint]model.Metric, *sync.Mutex, *limiter.QueryLimiter) error, matchers ...*labels.Matcher) ([]model.Metric, error) { replicationSet, err := d.GetIngestersForMetadata(ctx) queryLimiter := limiter.QueryLimiterFromContextWithFallback(ctx) if err != nil { return nil, err } - req, err := ingester_client.ToMetricsForLabelMatchersRequest(from, through, matchers) + req, err := ingester_client.ToMetricsForLabelMatchersRequest(from, through, getLimitFromSelectHints(hints), matchers) if err != nil { return nil, err } @@ -1309,7 +1320,7 @@ func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetad } // UserStats returns statistics about the current user. -func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { +func (d *Distributor) UserStats(ctx context.Context) (*ingester.UserStats, error) { replicationSet, err := d.GetIngestersForMetadata(ctx) if err != nil { return nil, err @@ -1326,7 +1337,7 @@ func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { return nil, err } - totalStats := &UserStats{} + totalStats := &ingester.UserStats{} for _, resp := range resps { r := resp.(*ingester_client.UserStatsResponse) totalStats.IngestionRate += r.IngestionRate @@ -1344,17 +1355,11 @@ func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { return totalStats, nil } -// UserIDStats models ingestion statistics for one user, including the user ID -type UserIDStats struct { - UserID string `json:"userID"` - UserStats -} - // AllUserStats returns statistics about all users. // Note it does not divide by the ReplicationFactor like UserStats() -func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) { +func (d *Distributor) AllUserStats(ctx context.Context) ([]ingester.UserIDStats, error) { // Add up by user, across all responses from ingesters - perUserTotals := make(map[string]UserStats) + perUserTotals := make(map[string]ingester.UserStats) req := &ingester_client.UserStatsRequest{} ctx = user.InjectOrgID(ctx, "1") // fake: ingester insists on having an org ID @@ -1379,21 +1384,23 @@ func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) { s.RuleIngestionRate += u.Data.RuleIngestionRate s.NumSeries += u.Data.NumSeries s.ActiveSeries += u.Data.ActiveSeries + s.LoadedBlocks += u.Data.LoadedBlocks perUserTotals[u.UserId] = s } } // Turn aggregated map into a slice for return - response := make([]UserIDStats, 0, len(perUserTotals)) + response := make([]ingester.UserIDStats, 0, len(perUserTotals)) for id, stats := range perUserTotals { - response = append(response, UserIDStats{ + response = append(response, ingester.UserIDStats{ UserID: id, - UserStats: UserStats{ + UserStats: ingester.UserStats{ IngestionRate: stats.IngestionRate, APIIngestionRate: stats.APIIngestionRate, RuleIngestionRate: stats.RuleIngestionRate, NumSeries: stats.NumSeries, ActiveSeries: stats.ActiveSeries, + LoadedBlocks: stats.LoadedBlocks, }, }) } @@ -1401,6 +1408,17 @@ func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) { return response, nil } +// AllUserStatsHandler shows stats for all users. +func (d *Distributor) AllUserStatsHandler(w http.ResponseWriter, r *http.Request) { + stats, err := d.AllUserStats(r.Context()) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + ingester.AllUserStatsRender(w, r, stats, d.ingestersRing.ReplicationFactor()) +} + func (d *Distributor) ServeHTTP(w http.ResponseWriter, req *http.Request) { if d.distributorsRing != nil { d.distributorsRing.ServeHTTP(w, req) @@ -1438,3 +1456,17 @@ func findHALabels(replicaLabel, clusterLabel string, labels []cortexpb.LabelAdap return cluster, replica } + +func getLimitFromLabelHints(hints *storage.LabelHints) int { + if hints != nil { + return hints.Limit + } + return 0 +} + +func getLimitFromSelectHints(hints *storage.SelectHints) int { + if hints != nil { + return hints.Limit + } + return 0 +} diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 75c104528d..e5eda834ca 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -404,21 +404,25 @@ func TestDistributor_MetricsCleanup(t *testing.T) { d := dists[0] reg := regs[0] - metrics := []string{ + permanentMetrics := []string{ "cortex_distributor_received_samples_total", "cortex_distributor_received_exemplars_total", "cortex_distributor_received_metadata_total", - "cortex_distributor_deduped_samples_total", "cortex_distributor_samples_in_total", - "cortex_distributor_exemplars_in_total", - "cortex_distributor_metadata_in_total", - "cortex_distributor_non_ha_samples_received_total", - "cortex_distributor_latest_seen_sample_timestamp_seconds", "cortex_distributor_ingester_append_failures_total", "cortex_distributor_ingester_appends_total", "cortex_distributor_ingester_query_failures_total", "cortex_distributor_ingester_queries_total", } + removedMetrics := []string{ + "cortex_distributor_deduped_samples_total", + "cortex_distributor_exemplars_in_total", + "cortex_distributor_metadata_in_total", + "cortex_distributor_non_ha_samples_received_total", + "cortex_distributor_latest_seen_sample_timestamp_seconds", + } + + allMetrics := append(removedMetrics, permanentMetrics...) d.receivedSamples.WithLabelValues("userA", sampleMetricTypeFloat).Add(5) d.receivedSamples.WithLabelValues("userB", sampleMetricTypeFloat).Add(10) @@ -505,7 +509,7 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # TYPE cortex_distributor_ingester_query_failures_total counter cortex_distributor_ingester_query_failures_total{ingester="ingester-0"} 1 cortex_distributor_ingester_query_failures_total{ingester="ingester-1"} 1 - `), metrics...)) + `), allMetrics...)) d.cleanupInactiveUser("userA") @@ -524,18 +528,6 @@ func TestDistributor_MetricsCleanup(t *testing.T) { d.cleanStaleIngesterMetrics() require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_distributor_deduped_samples_total The total number of deduplicated samples. - # TYPE cortex_distributor_deduped_samples_total counter - - # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. - # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge - - # HELP cortex_distributor_metadata_in_total The total number of metadata the have come in to the distributor, including rejected. - # TYPE cortex_distributor_metadata_in_total counter - - # HELP cortex_distributor_non_ha_samples_received_total The total number of received samples for a user that has HA tracking turned on, but the sample didn't contain both HA labels. - # TYPE cortex_distributor_non_ha_samples_received_total counter - # HELP cortex_distributor_received_metadata_total The total number of received metadata, excluding rejected. # TYPE cortex_distributor_received_metadata_total counter cortex_distributor_received_metadata_total{user="userB"} 10 @@ -553,9 +545,6 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # TYPE cortex_distributor_received_exemplars_total counter cortex_distributor_received_exemplars_total{user="userB"} 10 - # HELP cortex_distributor_exemplars_in_total The total number of exemplars that have come in to the distributor, including rejected or deduped exemplars. - # TYPE cortex_distributor_exemplars_in_total counter - # HELP cortex_distributor_ingester_append_failures_total The total number of failed batch appends sent to ingesters. # TYPE cortex_distributor_ingester_append_failures_total counter cortex_distributor_ingester_append_failures_total{ingester="ingester-1",status="2xx",type="metadata"} 1 @@ -568,7 +557,11 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # HELP cortex_distributor_ingester_query_failures_total The total number of failed queries sent to ingesters. # TYPE cortex_distributor_ingester_query_failures_total counter cortex_distributor_ingester_query_failures_total{ingester="ingester-1"} 1 - `), metrics...)) + `), permanentMetrics...)) + + err = testutil.GatherAndCompare(reg, strings.NewReader(""), removedMetrics...) + require.ErrorContains(t, err, "expected metric name(s) not found") + require.ErrorContains(t, err, strings.Join(removedMetrics, " ")) } func TestDistributor_PushIngestionRateLimiter(t *testing.T) { @@ -1904,7 +1897,7 @@ func BenchmarkDistributor_GetLabelsValues(b *testing.B) { b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - _, err := ds[0].LabelValuesForLabelName(ctx, model.Time(time.Now().UnixMilli()), model.Time(time.Now().UnixMilli()), "__name__") + _, err := ds[0].LabelValuesForLabelName(ctx, model.Time(time.Now().UnixMilli()), model.Time(time.Now().UnixMilli()), "__name__", nil) require.NoError(b, err) } }) @@ -2270,7 +2263,7 @@ func TestDistributor_MetricsForLabelMatchers_SingleSlowIngester(t *testing.T) { } for i := 0; i < 50; i++ { - _, err := ds[0].MetricsForLabelMatchers(ctx, now, now, mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test")) + _, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test")) require.NoError(t, err) } } @@ -2439,7 +2432,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { } { - metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, testData.matchers...) + metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, testData.matchers...) if testData.expectedErr != nil { assert.ErrorIs(t, err, testData.expectedErr) @@ -2457,7 +2450,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { } { - metrics, err := ds[0].MetricsForLabelMatchersStream(ctx, now, now, testData.matchers...) + metrics, err := ds[0].MetricsForLabelMatchersStream(ctx, now, now, nil, testData.matchers...) if testData.expectedErr != nil { assert.ErrorIs(t, err, testData.expectedErr) return @@ -2544,7 +2537,7 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) { for n := 0; n < b.N; n++ { now := model.Now() - metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, testData.matchers...) + metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, testData.matchers...) if testData.expectedErr != nil { assert.EqualError(b, err, testData.expectedErr.Error()) @@ -3197,7 +3190,7 @@ func (i *mockIngester) MetricsForLabelMatchersStream(ctx context.Context, req *c return nil, errFail } - _, _, multiMatchers, err := client.FromMetricsForLabelMatchersRequest(req) + _, _, _, multiMatchers, err := client.FromMetricsForLabelMatchersRequest(req) if err != nil { return nil, err } @@ -3229,7 +3222,7 @@ func (i *mockIngester) MetricsForLabelMatchers(ctx context.Context, req *client. return nil, errFail } - _, _, multiMatchers, err := client.FromMetricsForLabelMatchersRequest(req) + _, _, _, multiMatchers, err := client.FromMetricsForLabelMatchersRequest(req) if err != nil { return nil, err } diff --git a/pkg/distributor/http_server.go b/pkg/distributor/http_server.go index 679e5879c7..8934f3718e 100644 --- a/pkg/distributor/http_server.go +++ b/pkg/distributor/http_server.go @@ -6,15 +6,6 @@ import ( "github.com/cortexproject/cortex/pkg/util" ) -// UserStats models ingestion statistics for one user. -type UserStats struct { - IngestionRate float64 `json:"ingestionRate"` - NumSeries uint64 `json:"numSeries"` - APIIngestionRate float64 `json:"APIIngestionRate"` - RuleIngestionRate float64 `json:"RuleIngestionRate"` - ActiveSeries uint64 `json:"activeSeries"` -} - // UserStatsHandler handles user stats to the Distributor. func (d *Distributor) UserStatsHandler(w http.ResponseWriter, r *http.Request) { stats, err := d.UserStats(r.Context()) diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 61bb2590cf..64c7b55f4b 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -91,6 +91,7 @@ type Handler struct { // Metrics. querySeconds *prometheus.CounterVec querySeries *prometheus.CounterVec + querySamples *prometheus.CounterVec queryChunkBytes *prometheus.CounterVec queryDataBytes *prometheus.CounterVec rejectedQueries *prometheus.CounterVec @@ -116,6 +117,11 @@ func NewHandler(cfg HandlerConfig, roundTripper http.RoundTripper, log log.Logge Help: "Number of series fetched to execute a query.", }, []string{"user"}) + h.querySamples = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_query_samples_total", + Help: "Number of samples fetched to execute a query.", + }, []string{"user"}) + h.queryChunkBytes = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "cortex_query_fetched_chunks_bytes_total", Help: "Size of all chunks fetched to execute a query in bytes.", @@ -137,6 +143,7 @@ func NewHandler(cfg HandlerConfig, roundTripper http.RoundTripper, log log.Logge h.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(func(user string) { h.querySeconds.DeleteLabelValues(user) h.querySeries.DeleteLabelValues(user) + h.querySamples.DeleteLabelValues(user) h.queryChunkBytes.DeleteLabelValues(user) h.queryDataBytes.DeleteLabelValues(user) if err := util.DeleteMatchingLabels(h.rejectedQueries, map[string]string{"user": user}); err != nil { @@ -305,6 +312,7 @@ func (f *Handler) reportQueryStats(r *http.Request, userID string, queryString u // Track stats. f.querySeconds.WithLabelValues(userID).Add(wallTime.Seconds()) f.querySeries.WithLabelValues(userID).Add(float64(numSeries)) + f.querySamples.WithLabelValues(userID).Add(float64(numSamples)) f.queryChunkBytes.WithLabelValues(userID).Add(float64(numChunkBytes)) f.queryDataBytes.WithLabelValues(userID).Add(float64(numDataBytes)) f.activeUsers.UpdateUserTimestamp(userID, time.Now()) diff --git a/pkg/frontend/transport/handler_test.go b/pkg/frontend/transport/handler_test.go index 7967185845..b1933ffc86 100644 --- a/pkg/frontend/transport/handler_test.go +++ b/pkg/frontend/transport/handler_test.go @@ -188,7 +188,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with stats enabled", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripper, expectedStatusCode: http.StatusOK, }, @@ -202,7 +202,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonResponseTooLarge", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusRequestEntityTooLarge, @@ -218,7 +218,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonTooManyRequests", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusTooManyRequests, @@ -234,7 +234,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonTooManySamples", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -250,7 +250,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonTooLongRange", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -266,7 +266,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonSeriesFetched", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -282,7 +282,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonChunksFetched", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -298,7 +298,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonChunkBytesFetched", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -314,7 +314,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonDataBytesFetched", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -330,7 +330,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonSeriesLimitStoreGateway", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -346,7 +346,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonChunksLimitStoreGateway", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -362,7 +362,7 @@ func TestHandler_ServeHTTP(t *testing.T) { { name: "test handler with reasonBytesLimitStoreGateway", cfg: HandlerConfig{QueryStatsEnabled: true}, - expectedMetrics: 3, + expectedMetrics: 4, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnprocessableEntity, @@ -393,6 +393,7 @@ func TestHandler_ServeHTTP(t *testing.T) { reg, "cortex_query_seconds_total", "cortex_query_fetched_series_total", + "cortex_query_samples_total", "cortex_query_fetched_chunks_bytes_total", ) diff --git a/pkg/frontend/v1/frontend.go b/pkg/frontend/v1/frontend.go index 060020be18..7c7375027a 100644 --- a/pkg/frontend/v1/frontend.go +++ b/pkg/frontend/v1/frontend.go @@ -21,7 +21,9 @@ import ( "github.com/cortexproject/cortex/pkg/scheduler/queue" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -32,13 +34,14 @@ var ( // Config for a Frontend. type Config struct { - MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"` - QuerierForgetDelay time.Duration `yaml:"querier_forget_delay"` + QuerierForgetDelay time.Duration `yaml:"querier_forget_delay"` } // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&cfg.MaxOutstandingPerTenant, "querier.max-outstanding-requests-per-tenant", 0, "Deprecated (use frontend.max-outstanding-requests-per-tenant instead) and will be removed in v1.17.0: Maximum number of outstanding requests per tenant per frontend; requests beyond this error with HTTP 429.") + //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods + flagext.DeprecatedFlag(f, "querier.max-outstanding-requests-per-tenant", "Deprecated: Use frontend.max-outstanding-requests-per-tenant instead.", util_log.Logger) + f.DurationVar(&cfg.QuerierForgetDelay, "query-frontend.querier-forget-delay", 0, "If a querier disconnects without sending notification about graceful shutdown, the query-frontend will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.") } @@ -129,7 +132,7 @@ func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Regist }), } - f.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, f.queueLength, f.discardedRequests, f.limits, registerer) + f.requestQueue = queue.NewRequestQueue(cfg.QuerierForgetDelay, f.queueLength, f.discardedRequests, f.limits, registerer) f.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(f.cleanupInactiveUserMetrics) var err error diff --git a/pkg/frontend/v1/frontend_test.go b/pkg/frontend/v1/frontend_test.go index 1206969a93..766d54f07f 100644 --- a/pkg/frontend/v1/frontend_test.go +++ b/pkg/frontend/v1/frontend_test.go @@ -129,7 +129,7 @@ func TestFrontendCheckReady(t *testing.T) { t.Run(tt.name, func(t *testing.T) { f := &Frontend{ log: log.NewNopLogger(), - requestQueue: queue.NewRequestQueue(5, 0, + requestQueue: queue.NewRequestQueue(5, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}), limits, @@ -216,10 +216,7 @@ func TestFrontendMetricsCleanup(t *testing.T) { fr.cleanupInactiveUserMetrics("1") - require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_query_frontend_queue_length Number of queries in the queue. - # TYPE cortex_query_frontend_queue_length gauge - `), "cortex_query_frontend_queue_length")) + require.ErrorContains(t, testutil.GatherAndCompare(reg, strings.NewReader(""), "cortex_query_frontend_queue_length"), "expected metric name(s) not found") } testFrontend(t, defaultFrontendConfig(), handler, test, matchMaxConcurrency, nil, reg) diff --git a/pkg/frontend/v1/queue_test.go b/pkg/frontend/v1/queue_test.go index 3549eaf98b..a11cfe1513 100644 --- a/pkg/frontend/v1/queue_test.go +++ b/pkg/frontend/v1/queue_test.go @@ -22,10 +22,10 @@ import ( "github.com/cortexproject/cortex/pkg/util/services" ) -func setupFrontend(t *testing.T, config Config) (*Frontend, error) { +func setupFrontend(t *testing.T, maxOutstanding int, config Config) (*Frontend, error) { logger := log.NewNopLogger() - limits := MockLimits{Queriers: 3, MockLimits: queue.MockLimits{MaxOutstanding: 100}} + limits := MockLimits{Queriers: 3, MockLimits: queue.MockLimits{MaxOutstanding: maxOutstanding}} frontend, err := New(config, limits, logger, nil, transport.NewRetry(0, nil)) require.NoError(t, err) @@ -51,10 +51,9 @@ func testReq(ctx context.Context, reqID, user string) *request { func TestDequeuesExpiredRequests(t *testing.T) { var config Config flagext.DefaultValues(&config) - config.MaxOutstandingPerTenant = 10 userID := "1" - f, err := setupFrontend(t, config) + f, err := setupFrontend(t, 10, config) require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), userID) @@ -62,7 +61,7 @@ func TestDequeuesExpiredRequests(t *testing.T) { cancel() good := 0 - for i := 0; i < config.MaxOutstandingPerTenant; i++ { + for i := 0; i < 10; i++ { var err error if i%5 == 0 { good++ @@ -99,9 +98,7 @@ func TestRoundRobinQueues(t *testing.T) { tenants = 10 ) - config.MaxOutstandingPerTenant = requests - - f, err := setupFrontend(t, config) + f, err := setupFrontend(t, requests, config) require.NoError(t, err) for i := 0; i < requests; i++ { diff --git a/pkg/frontend/v2/frontend.go b/pkg/frontend/v2/frontend.go index 2df0f8f344..a8f49b11ab 100644 --- a/pkg/frontend/v2/frontend.go +++ b/pkg/frontend/v2/frontend.go @@ -57,7 +57,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.Addr, "frontend.instance-addr", "", "IP address to advertise to querier (via scheduler) (resolved via interfaces by default).") f.IntVar(&cfg.Port, "frontend.instance-port", 0, "Port to advertise to querier (via scheduler) (defaults to server.grpc-listen-port).") - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("frontend.grpc-client-config", f) + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("frontend.grpc-client-config", "", f) } // Frontend implements GrpcRoundTripper. It queues HTTP requests, diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go index d10834f3c8..b437b95d01 100644 --- a/pkg/ingester/client/client.go +++ b/pkg/ingester/client/client.go @@ -6,6 +6,7 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock" "github.com/go-kit/log" "github.com/pkg/errors" @@ -116,7 +117,7 @@ type Config struct { // RegisterFlags registers configuration settings used by the ingester client config. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("ingester.client", f) + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("ingester.client", snappyblock.Name, f) f.Int64Var(&cfg.MaxInflightPushRequests, "ingester.client.max-inflight-push-requests", 0, "Max inflight push requests that this ingester client can handle. This limit is per-ingester-client. Additional requests will be rejected. 0 = unlimited.") } diff --git a/pkg/ingester/client/compat.go b/pkg/ingester/client/compat.go index cbac06a46f..6e4a81d634 100644 --- a/pkg/ingester/client/compat.go +++ b/pkg/ingester/client/compat.go @@ -114,7 +114,7 @@ func ToQueryResponse(matrix model.Matrix) *QueryResponse { } // ToMetricsForLabelMatchersRequest builds a MetricsForLabelMatchersRequest proto -func ToMetricsForLabelMatchersRequest(from, to model.Time, matchers []*labels.Matcher) (*MetricsForLabelMatchersRequest, error) { +func ToMetricsForLabelMatchersRequest(from, to model.Time, limit int, matchers []*labels.Matcher) (*MetricsForLabelMatchersRequest, error) { ms, err := toLabelMatchers(matchers) if err != nil { return nil, err @@ -124,6 +124,7 @@ func ToMetricsForLabelMatchersRequest(from, to model.Time, matchers []*labels.Ma StartTimestampMs: int64(from), EndTimestampMs: int64(to), MatchersSet: []*LabelMatchers{{Matchers: ms}}, + Limit: int64(limit), }, nil } @@ -174,22 +175,22 @@ func SeriesSetToQueryResponse(s storage.SeriesSet) (*QueryResponse, error) { } // FromMetricsForLabelMatchersRequest unpacks a MetricsForLabelMatchersRequest proto -func FromMetricsForLabelMatchersRequest(req *MetricsForLabelMatchersRequest) (model.Time, model.Time, [][]*labels.Matcher, error) { +func FromMetricsForLabelMatchersRequest(req *MetricsForLabelMatchersRequest) (model.Time, model.Time, int, [][]*labels.Matcher, error) { matchersSet := make([][]*labels.Matcher, 0, len(req.MatchersSet)) for _, matchers := range req.MatchersSet { matchers, err := FromLabelMatchers(matchers.Matchers) if err != nil { - return 0, 0, nil, err + return 0, 0, 0, nil, err } matchersSet = append(matchersSet, matchers) } from := model.Time(req.StartTimestampMs) to := model.Time(req.EndTimestampMs) - return from, to, matchersSet, nil + return from, to, int(req.Limit), matchersSet, nil } // ToLabelValuesRequest builds a LabelValuesRequest proto -func ToLabelValuesRequest(labelName model.LabelName, from, to model.Time, matchers []*labels.Matcher) (*LabelValuesRequest, error) { +func ToLabelValuesRequest(labelName model.LabelName, from, to model.Time, limit int, matchers []*labels.Matcher) (*LabelValuesRequest, error) { ms, err := toLabelMatchers(matchers) if err != nil { return nil, err @@ -200,22 +201,23 @@ func ToLabelValuesRequest(labelName model.LabelName, from, to model.Time, matche StartTimestampMs: int64(from), EndTimestampMs: int64(to), Matchers: &LabelMatchers{Matchers: ms}, + Limit: int64(limit), }, nil } // FromLabelValuesRequest unpacks a LabelValuesRequest proto -func FromLabelValuesRequest(req *LabelValuesRequest) (string, int64, int64, []*labels.Matcher, error) { +func FromLabelValuesRequest(req *LabelValuesRequest) (string, int64, int64, int, []*labels.Matcher, error) { var err error var matchers []*labels.Matcher if req.Matchers != nil { matchers, err = FromLabelMatchers(req.Matchers.Matchers) if err != nil { - return "", 0, 0, nil, err + return "", 0, 0, 0, nil, err } } - return req.LabelName, req.StartTimestampMs, req.EndTimestampMs, matchers, nil + return req.LabelName, req.StartTimestampMs, req.EndTimestampMs, int(req.Limit), matchers, nil } func toLabelMatchers(matchers []*labels.Matcher) ([]*LabelMatcher, error) { diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go index 643c9c6c2e..23f1fbdf5b 100644 --- a/pkg/ingester/client/ingester.pb.go +++ b/pkg/ingester/client/ingester.pb.go @@ -402,6 +402,7 @@ type LabelValuesRequest struct { StartTimestampMs int64 `protobuf:"varint,2,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` EndTimestampMs int64 `protobuf:"varint,3,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` Matchers *LabelMatchers `protobuf:"bytes,4,opt,name=matchers,proto3" json:"matchers,omitempty"` + Limit int64 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } @@ -464,6 +465,13 @@ func (m *LabelValuesRequest) GetMatchers() *LabelMatchers { return nil } +func (m *LabelValuesRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + type LabelValuesResponse struct { LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` } @@ -553,6 +561,7 @@ func (m *LabelValuesStreamResponse) GetLabelValues() []string { type LabelNamesRequest struct { StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } @@ -601,6 +610,13 @@ func (m *LabelNamesRequest) GetEndTimestampMs() int64 { return 0 } +func (m *LabelNamesRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + type LabelNamesResponse struct { LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` } @@ -728,6 +744,7 @@ type UserStatsResponse struct { ApiIngestionRate float64 `protobuf:"fixed64,3,opt,name=api_ingestion_rate,json=apiIngestionRate,proto3" json:"api_ingestion_rate,omitempty"` RuleIngestionRate float64 `protobuf:"fixed64,4,opt,name=rule_ingestion_rate,json=ruleIngestionRate,proto3" json:"rule_ingestion_rate,omitempty"` ActiveSeries uint64 `protobuf:"varint,5,opt,name=active_series,json=activeSeries,proto3" json:"active_series,omitempty"` + LoadedBlocks uint64 `protobuf:"varint,6,opt,name=loaded_blocks,json=loadedBlocks,proto3" json:"loaded_blocks,omitempty"` } func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } @@ -797,6 +814,13 @@ func (m *UserStatsResponse) GetActiveSeries() uint64 { return 0 } +func (m *UserStatsResponse) GetLoadedBlocks() uint64 { + if m != nil { + return m.LoadedBlocks + } + return 0 +} + type UserIDStatsResponse struct { UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -895,6 +919,7 @@ type MetricsForLabelMatchersRequest struct { StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"` + Limit int64 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} } @@ -950,6 +975,13 @@ func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers { return nil } +func (m *MetricsForLabelMatchersRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + type MetricsForLabelMatchersResponse struct { Metric []*cortexpb.Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"` } @@ -1444,88 +1476,90 @@ func init() { func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) } var fileDescriptor_60f6df4f3586b478 = []byte{ - // 1292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4b, 0x6f, 0xd4, 0xd6, - 0x17, 0xb7, 0x33, 0x93, 0x49, 0xe6, 0xcc, 0x64, 0x98, 0xdc, 0x04, 0x32, 0x98, 0x3f, 0x0e, 0xf8, - 0x2f, 0xda, 0xa8, 0x2d, 0x09, 0xa4, 0xad, 0x04, 0x7d, 0xa1, 0x04, 0x02, 0x04, 0x08, 0x01, 0x67, - 0xa0, 0x55, 0xd5, 0xca, 0x72, 0x66, 0x2e, 0x13, 0x17, 0xbf, 0xb0, 0xaf, 0x11, 0x74, 0x55, 0xa9, - 0x1f, 0xa0, 0x5d, 0x76, 0xdb, 0x5d, 0xd7, 0xfd, 0x00, 0x5d, 0xb3, 0x64, 0x89, 0xaa, 0x0a, 0x95, - 0x41, 0xaa, 0xba, 0xa4, 0xdf, 0xa0, 0xf2, 0x7d, 0x78, 0x6c, 0xc7, 0x93, 0x0c, 0x12, 0xe9, 0x6e, - 0x7c, 0xce, 0xef, 0x9c, 0xfb, 0x3b, 0x8f, 0x7b, 0xcf, 0x19, 0x68, 0x58, 0x6e, 0x0f, 0x87, 0x04, - 0x07, 0x8b, 0x7e, 0xe0, 0x11, 0x0f, 0x55, 0x3a, 0x5e, 0x40, 0xf0, 0x23, 0x65, 0xb6, 0xe7, 0xf5, - 0x3c, 0x2a, 0x5a, 0x8a, 0x7f, 0x31, 0xad, 0x72, 0xbe, 0x67, 0x91, 0x9d, 0x68, 0x7b, 0xb1, 0xe3, - 0x39, 0x4b, 0x0c, 0xe8, 0x07, 0xde, 0x37, 0xb8, 0x43, 0xf8, 0xd7, 0x92, 0x7f, 0xbf, 0x27, 0x14, - 0xdb, 0xfc, 0x07, 0x33, 0xd5, 0x3e, 0x85, 0x9a, 0x8e, 0xcd, 0xae, 0x8e, 0x1f, 0x44, 0x38, 0x24, - 0x68, 0x11, 0x26, 0x1e, 0x44, 0x38, 0xb0, 0x70, 0xd8, 0x92, 0x4f, 0x94, 0x16, 0x6a, 0xcb, 0xb3, - 0x8b, 0x1c, 0x7e, 0x3b, 0xc2, 0xc1, 0x63, 0x0e, 0xd3, 0x05, 0x48, 0xbb, 0x00, 0x75, 0x66, 0x1e, - 0xfa, 0x9e, 0x1b, 0x62, 0xb4, 0x04, 0x13, 0x01, 0x0e, 0x23, 0x9b, 0x08, 0xfb, 0xc3, 0x39, 0x7b, - 0x86, 0xd3, 0x05, 0x4a, 0xbb, 0x0e, 0x53, 0x19, 0x0d, 0xfa, 0x08, 0x80, 0x58, 0x0e, 0x0e, 0x8b, - 0x48, 0xf8, 0xdb, 0x8b, 0x6d, 0xcb, 0xc1, 0x5b, 0x54, 0xb7, 0x5a, 0x7e, 0xf2, 0x7c, 0x5e, 0xd2, - 0x53, 0x68, 0xed, 0x27, 0x19, 0xea, 0x69, 0x9e, 0xe8, 0x3d, 0x40, 0x21, 0x31, 0x03, 0x62, 0x50, - 0x10, 0x31, 0x1d, 0xdf, 0x70, 0x62, 0xa7, 0xf2, 0x42, 0x49, 0x6f, 0x52, 0x4d, 0x5b, 0x28, 0x36, - 0x42, 0xb4, 0x00, 0x4d, 0xec, 0x76, 0xb3, 0xd8, 0x31, 0x8a, 0x6d, 0x60, 0xb7, 0x9b, 0x46, 0x9e, - 0x81, 0x49, 0xc7, 0x24, 0x9d, 0x1d, 0x1c, 0x84, 0xad, 0x52, 0x36, 0x4f, 0x37, 0xcc, 0x6d, 0x6c, - 0x6f, 0x30, 0xa5, 0x9e, 0xa0, 0xb4, 0x9f, 0x65, 0x98, 0x5d, 0x7b, 0x84, 0x1d, 0xdf, 0x36, 0x83, - 0xff, 0x84, 0xe2, 0xd9, 0x5d, 0x14, 0x0f, 0x17, 0x51, 0x0c, 0x53, 0x1c, 0xbf, 0x82, 0x19, 0x4a, - 0x6d, 0x8b, 0x04, 0xd8, 0x74, 0x92, 0x8a, 0x5c, 0x80, 0x5a, 0x67, 0x27, 0x72, 0xef, 0x67, 0x4a, - 0x32, 0x27, 0x9c, 0x0d, 0x0a, 0x72, 0x31, 0x06, 0xf1, 0xaa, 0xa4, 0x2d, 0xae, 0x95, 0x27, 0xc7, - 0x9a, 0x25, 0x6d, 0x0b, 0x0e, 0xe7, 0x12, 0xf0, 0x06, 0x2a, 0xfe, 0x9b, 0x0c, 0x88, 0x86, 0x73, - 0xd7, 0xb4, 0x23, 0x1c, 0x8a, 0xa4, 0x1e, 0x07, 0xb0, 0x63, 0xa9, 0xe1, 0x9a, 0x0e, 0xa6, 0xc9, - 0xac, 0xea, 0x55, 0x2a, 0xb9, 0x69, 0x3a, 0x78, 0x48, 0xce, 0xc7, 0x5e, 0x23, 0xe7, 0xa5, 0x7d, - 0x73, 0x5e, 0x3e, 0x21, 0x8f, 0x92, 0xf3, 0x73, 0x30, 0x93, 0xe1, 0xcf, 0x73, 0x72, 0x12, 0xea, - 0x2c, 0x80, 0x87, 0x54, 0x4e, 0xb3, 0x52, 0xd5, 0x6b, 0xf6, 0x00, 0xaa, 0x7d, 0x06, 0x47, 0x53, - 0x96, 0xb9, 0x9a, 0x8d, 0x60, 0x7f, 0x1f, 0xa6, 0x6f, 0x88, 0x8c, 0x84, 0x07, 0xdc, 0x8d, 0xda, - 0x87, 0xbc, 0x4c, 0xfc, 0x30, 0xce, 0x72, 0x1e, 0x6a, 0x83, 0x32, 0x09, 0x92, 0x90, 0xd4, 0x29, - 0xd4, 0x3e, 0x86, 0xd6, 0xc0, 0x2c, 0x17, 0xe2, 0xbe, 0xc6, 0x08, 0x9a, 0x77, 0x42, 0x1c, 0x6c, - 0x11, 0x93, 0x88, 0xf8, 0xb4, 0x3f, 0x64, 0x98, 0x4e, 0x09, 0xb9, 0xab, 0x53, 0xe2, 0xbd, 0xb5, - 0x3c, 0xd7, 0x08, 0x4c, 0xc2, 0x5a, 0x46, 0xd6, 0xa7, 0x12, 0xa9, 0x6e, 0x12, 0x1c, 0x77, 0x95, - 0x1b, 0x39, 0x06, 0x6f, 0xd4, 0x38, 0xd0, 0xb2, 0x5e, 0x75, 0x23, 0x87, 0x75, 0x67, 0x9c, 0x3b, - 0xd3, 0xb7, 0x8c, 0x9c, 0xa7, 0x12, 0xf5, 0xd4, 0x34, 0x7d, 0x6b, 0x3d, 0xe3, 0x6c, 0x11, 0x66, - 0x82, 0xc8, 0xc6, 0x79, 0x78, 0x99, 0xc2, 0xa7, 0x63, 0x55, 0x16, 0xff, 0x7f, 0x98, 0x32, 0x3b, - 0xc4, 0x7a, 0x88, 0xc5, 0xf9, 0xe3, 0xf4, 0xfc, 0x3a, 0x13, 0x32, 0x0a, 0xda, 0xd7, 0x30, 0x13, - 0x47, 0xb7, 0x7e, 0x29, 0x1b, 0xdf, 0x1c, 0x4c, 0x44, 0x21, 0x0e, 0x0c, 0xab, 0xcb, 0xef, 0x42, - 0x25, 0xfe, 0x5c, 0xef, 0xa2, 0xd3, 0x50, 0xee, 0x9a, 0xc4, 0xa4, 0xb1, 0xd4, 0x96, 0x8f, 0x8a, - 0x66, 0xdd, 0x95, 0x21, 0x9d, 0xc2, 0xb4, 0x2b, 0x80, 0x62, 0x55, 0x98, 0xf5, 0x7e, 0x16, 0xc6, - 0xc3, 0x58, 0xc0, 0xaf, 0xee, 0xb1, 0xb4, 0x97, 0x1c, 0x13, 0x9d, 0x21, 0xb5, 0x5f, 0x65, 0x50, - 0x37, 0x30, 0x09, 0xac, 0x4e, 0x78, 0xd9, 0x0b, 0xb2, 0x77, 0xe3, 0x80, 0xdf, 0xc5, 0x73, 0x50, - 0x17, 0x97, 0xcf, 0x08, 0x31, 0xd9, 0xfb, 0x6d, 0xac, 0x09, 0xe8, 0x16, 0x26, 0xda, 0x75, 0x98, - 0x1f, 0xca, 0x99, 0xa7, 0x62, 0x01, 0x2a, 0x0e, 0x85, 0xf0, 0x5c, 0x34, 0x07, 0xcf, 0x18, 0x33, - 0xd5, 0xb9, 0x5e, 0xbb, 0x0d, 0xa7, 0x86, 0x38, 0xcb, 0xb5, 0xf9, 0xe8, 0x2e, 0x5b, 0x70, 0x84, - 0xbb, 0xdc, 0xc0, 0xc4, 0x8c, 0x0b, 0x26, 0xba, 0x7e, 0x13, 0xe6, 0x76, 0x69, 0xb8, 0xfb, 0x0f, - 0x60, 0xd2, 0xe1, 0x32, 0x7e, 0x40, 0x2b, 0x7f, 0x40, 0x62, 0x93, 0x20, 0xb5, 0x7f, 0x64, 0x38, - 0x94, 0x7b, 0xf8, 0xe3, 0x12, 0xdc, 0x0b, 0x3c, 0xc7, 0x10, 0x9b, 0xcb, 0xa0, 0xdb, 0x1a, 0xb1, - 0x7c, 0x9d, 0x8b, 0xd7, 0xbb, 0xe9, 0x76, 0x1c, 0xcb, 0xb4, 0xa3, 0x0b, 0x15, 0x7a, 0x7f, 0xc5, - 0xc4, 0x9a, 0x19, 0x50, 0xa1, 0x29, 0xba, 0x65, 0x5a, 0xc1, 0xea, 0x4a, 0x3c, 0x04, 0x7e, 0x7f, - 0x3e, 0xff, 0x5a, 0x4b, 0x0f, 0xb3, 0x5f, 0xe9, 0x9a, 0x3e, 0xc1, 0x81, 0xce, 0x4f, 0x41, 0xef, - 0x42, 0x85, 0xcd, 0xa9, 0x56, 0x99, 0x9e, 0x37, 0x25, 0xba, 0x20, 0x3d, 0xca, 0x38, 0x44, 0xfb, - 0x41, 0x86, 0x71, 0x16, 0xe9, 0x41, 0xb5, 0xa6, 0x02, 0x93, 0xd8, 0xed, 0x78, 0x5d, 0xcb, 0xed, - 0xd1, 0x67, 0x63, 0x5c, 0x4f, 0xbe, 0x11, 0xe2, 0x37, 0x35, 0x7e, 0x1f, 0xea, 0xfc, 0x3a, 0xae, - 0xc0, 0x54, 0xa6, 0x73, 0x32, 0x6b, 0x89, 0x3c, 0xd2, 0x5a, 0x62, 0x40, 0x3d, 0xad, 0x41, 0xa7, - 0xa0, 0x4c, 0x1e, 0xfb, 0xec, 0xfd, 0x6b, 0x2c, 0x4f, 0x0b, 0x6b, 0xaa, 0x6e, 0x3f, 0xf6, 0xb1, - 0x4e, 0xd5, 0x31, 0x1b, 0x3a, 0x59, 0x59, 0xf9, 0xe8, 0x6f, 0x34, 0x0b, 0xe3, 0x74, 0xd8, 0x50, - 0xea, 0x55, 0x9d, 0x7d, 0x68, 0xdf, 0xcb, 0xd0, 0x18, 0x74, 0xca, 0x65, 0xcb, 0xc6, 0x6f, 0xa2, - 0x51, 0x14, 0x98, 0xbc, 0x67, 0xd9, 0x98, 0x72, 0x60, 0xc7, 0x25, 0xdf, 0x45, 0x99, 0x7a, 0xe7, - 0x1a, 0x54, 0x93, 0x10, 0x50, 0x15, 0xc6, 0xd7, 0x6e, 0xdf, 0x59, 0xb9, 0xd1, 0x94, 0xd0, 0x14, - 0x54, 0x6f, 0x6e, 0xb6, 0x0d, 0xf6, 0x29, 0xa3, 0x43, 0x50, 0xd3, 0xd7, 0xae, 0xac, 0x7d, 0x61, - 0x6c, 0xac, 0xb4, 0x2f, 0x5e, 0x6d, 0x8e, 0x21, 0x04, 0x0d, 0x26, 0xb8, 0xb9, 0xc9, 0x65, 0xa5, - 0xe5, 0xbf, 0x26, 0x60, 0x52, 0x70, 0x44, 0xe7, 0xa1, 0x7c, 0x2b, 0x0a, 0x77, 0xd0, 0x91, 0x41, - 0xa7, 0x7e, 0x1e, 0x58, 0x04, 0xf3, 0x9b, 0xa7, 0xcc, 0xed, 0x92, 0xb3, 0x7b, 0xa7, 0x49, 0xe8, - 0x12, 0xd4, 0x52, 0xdb, 0x16, 0x2a, 0x5c, 0xb4, 0x95, 0x63, 0x19, 0x69, 0xf6, 0x69, 0xd0, 0xa4, - 0x33, 0x32, 0xda, 0x84, 0x06, 0x55, 0x89, 0xd5, 0x2a, 0x44, 0xff, 0x13, 0x26, 0x45, 0xeb, 0xa6, - 0x72, 0x7c, 0x88, 0x36, 0xa1, 0x75, 0x15, 0x6a, 0xa9, 0xb5, 0x02, 0x29, 0x99, 0x06, 0xca, 0x6c, - 0x59, 0x03, 0x72, 0x05, 0x1b, 0x8c, 0x26, 0xa1, 0xbb, 0x7c, 0xc1, 0x48, 0x2f, 0x28, 0x7b, 0xfa, - 0x3b, 0x59, 0xa0, 0x2b, 0x08, 0x79, 0x0d, 0x60, 0xb0, 0x14, 0xa0, 0xa3, 0x19, 0xa3, 0xf4, 0x32, - 0xa3, 0x28, 0x45, 0xaa, 0x84, 0xde, 0x16, 0x34, 0xf3, 0xbb, 0xc5, 0x5e, 0xce, 0x4e, 0xec, 0x56, - 0x15, 0x70, 0x5b, 0x85, 0x6a, 0x32, 0x3c, 0x51, 0xab, 0x60, 0x9e, 0x32, 0x67, 0xc3, 0x27, 0xad, - 0x26, 0xa1, 0xcb, 0x50, 0x5f, 0xb1, 0xed, 0x51, 0xdc, 0x28, 0x69, 0x4d, 0x98, 0xf7, 0x63, 0x27, - 0xaf, 0x7e, 0x7e, 0xc4, 0xa0, 0xb7, 0x92, 0x8b, 0xbd, 0xe7, 0x10, 0x56, 0xde, 0xde, 0x17, 0x97, - 0x9c, 0xf6, 0x2d, 0x1c, 0xdf, 0x73, 0xa0, 0x8d, 0x7c, 0xe6, 0xe9, 0x7d, 0x70, 0x05, 0x59, 0x6f, - 0xc3, 0xa1, 0xdc, 0x7c, 0x43, 0x6a, 0xce, 0x4b, 0x6e, 0x24, 0x2a, 0xf3, 0x43, 0xf5, 0xc2, 0xef, - 0xea, 0x27, 0x4f, 0x5f, 0xa8, 0xd2, 0xb3, 0x17, 0xaa, 0xf4, 0xea, 0x85, 0x2a, 0x7f, 0xd7, 0x57, - 0xe5, 0x5f, 0xfa, 0xaa, 0xfc, 0xa4, 0xaf, 0xca, 0x4f, 0xfb, 0xaa, 0xfc, 0x67, 0x5f, 0x95, 0xff, - 0xee, 0xab, 0xd2, 0xab, 0xbe, 0x2a, 0xff, 0xf8, 0x52, 0x95, 0x9e, 0xbe, 0x54, 0xa5, 0x67, 0x2f, - 0x55, 0xe9, 0xcb, 0x4a, 0xc7, 0xb6, 0xb0, 0x4b, 0xb6, 0x2b, 0xf4, 0xff, 0xf5, 0xfb, 0xff, 0x06, - 0x00, 0x00, 0xff, 0xff, 0xa7, 0xb7, 0xb7, 0x0a, 0xca, 0x0f, 0x00, 0x00, + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4b, 0x6f, 0xd4, 0x56, + 0x14, 0x1e, 0x67, 0x1e, 0xc9, 0x9c, 0x79, 0x30, 0xb9, 0x09, 0x64, 0x30, 0xc5, 0x01, 0x23, 0xda, + 0xa8, 0x2d, 0x09, 0xa4, 0xad, 0x04, 0x7d, 0xa1, 0x04, 0x02, 0x04, 0x08, 0x01, 0x27, 0xd0, 0xaa, + 0x6a, 0x65, 0x39, 0x33, 0x97, 0xc4, 0xc5, 0x8f, 0xc1, 0xf7, 0x1a, 0x41, 0x57, 0x45, 0xfd, 0x01, + 0xed, 0xb2, 0xdb, 0xee, 0xfa, 0x53, 0x58, 0xb2, 0xe8, 0x02, 0x75, 0x81, 0xca, 0x20, 0x55, 0x5d, + 0xd2, 0x7f, 0x50, 0xf9, 0x3e, 0xfc, 0x8a, 0xf3, 0x40, 0x82, 0xee, 0x7c, 0xcf, 0xf9, 0xce, 0xb9, + 0xe7, 0x79, 0xcf, 0x31, 0xb4, 0x6d, 0x6f, 0x13, 0x13, 0x8a, 0x83, 0xd9, 0x41, 0xe0, 0x53, 0x1f, + 0xd5, 0x7a, 0x7e, 0x40, 0xf1, 0x43, 0x75, 0x72, 0xd3, 0xdf, 0xf4, 0x19, 0x69, 0x2e, 0xfa, 0xe2, + 0x5c, 0xf5, 0xdc, 0xa6, 0x4d, 0xb7, 0xc2, 0x8d, 0xd9, 0x9e, 0xef, 0xce, 0x71, 0xe0, 0x20, 0xf0, + 0xbf, 0xc7, 0x3d, 0x2a, 0x4e, 0x73, 0x83, 0x7b, 0x9b, 0x92, 0xb1, 0x21, 0x3e, 0xb8, 0xa8, 0xfe, + 0x05, 0x34, 0x0c, 0x6c, 0xf5, 0x0d, 0x7c, 0x3f, 0xc4, 0x84, 0xa2, 0x59, 0x18, 0xbd, 0x1f, 0xe2, + 0xc0, 0xc6, 0xa4, 0xab, 0x1c, 0x2b, 0xcf, 0x34, 0xe6, 0x27, 0x67, 0x05, 0xfc, 0x56, 0x88, 0x83, + 0x47, 0x02, 0x66, 0x48, 0x90, 0x7e, 0x1e, 0x9a, 0x5c, 0x9c, 0x0c, 0x7c, 0x8f, 0x60, 0x34, 0x07, + 0xa3, 0x01, 0x26, 0xa1, 0x43, 0xa5, 0xfc, 0xc1, 0x9c, 0x3c, 0xc7, 0x19, 0x12, 0xa5, 0x5f, 0x83, + 0x56, 0x86, 0x83, 0x3e, 0x05, 0xa0, 0xb6, 0x8b, 0x49, 0x91, 0x11, 0x83, 0x8d, 0xd9, 0x75, 0xdb, + 0xc5, 0x6b, 0x8c, 0xb7, 0x58, 0x79, 0xf2, 0x7c, 0xba, 0x64, 0xa4, 0xd0, 0xfa, 0xaf, 0x0a, 0x34, + 0xd3, 0x76, 0xa2, 0x0f, 0x01, 0x11, 0x6a, 0x05, 0xd4, 0x64, 0x20, 0x6a, 0xb9, 0x03, 0xd3, 0x8d, + 0x94, 0x2a, 0x33, 0x65, 0xa3, 0xc3, 0x38, 0xeb, 0x92, 0xb1, 0x42, 0xd0, 0x0c, 0x74, 0xb0, 0xd7, + 0xcf, 0x62, 0x47, 0x18, 0xb6, 0x8d, 0xbd, 0x7e, 0x1a, 0x79, 0x1a, 0xc6, 0x5c, 0x8b, 0xf6, 0xb6, + 0x70, 0x40, 0xba, 0xe5, 0x6c, 0x9c, 0xae, 0x5b, 0x1b, 0xd8, 0x59, 0xe1, 0x4c, 0x23, 0x46, 0xe9, + 0xbf, 0x29, 0x30, 0xb9, 0xf4, 0x10, 0xbb, 0x03, 0xc7, 0x0a, 0xfe, 0x17, 0x13, 0xcf, 0x6c, 0x33, + 0xf1, 0x60, 0x91, 0x89, 0x24, 0x65, 0xe3, 0xb7, 0x30, 0xc1, 0x4c, 0x5b, 0xa3, 0x01, 0xb6, 0xdc, + 0x38, 0x23, 0xe7, 0xa1, 0xd1, 0xdb, 0x0a, 0xbd, 0x7b, 0x99, 0x94, 0x4c, 0x49, 0x65, 0x49, 0x42, + 0x2e, 0x44, 0x20, 0x91, 0x95, 0xb4, 0xc4, 0xd5, 0xca, 0xd8, 0x48, 0xa7, 0xac, 0xaf, 0xc1, 0xc1, + 0x5c, 0x00, 0xde, 0x40, 0xc6, 0xff, 0x50, 0x00, 0x31, 0x77, 0xee, 0x58, 0x4e, 0x88, 0x89, 0x0c, + 0xea, 0x51, 0x00, 0x27, 0xa2, 0x9a, 0x9e, 0xe5, 0x62, 0x16, 0xcc, 0xba, 0x51, 0x67, 0x94, 0x1b, + 0x96, 0x8b, 0x77, 0x88, 0xf9, 0xc8, 0x6b, 0xc4, 0xbc, 0xbc, 0x67, 0xcc, 0x2b, 0xc7, 0x94, 0x7d, + 0xc4, 0x1c, 0x4d, 0x42, 0xd5, 0xb1, 0x5d, 0x9b, 0x76, 0xab, 0x4c, 0x23, 0x3f, 0xe8, 0x67, 0x61, + 0x22, 0xe3, 0x95, 0x88, 0xd4, 0x71, 0x68, 0x72, 0xb7, 0x1e, 0x30, 0x3a, 0x8b, 0x55, 0xdd, 0x68, + 0x38, 0x09, 0x54, 0xff, 0x12, 0x0e, 0xa7, 0x24, 0x73, 0x99, 0xdc, 0x87, 0xfc, 0x63, 0x05, 0xc6, + 0xaf, 0xcb, 0x40, 0x91, 0xb7, 0x5d, 0xa4, 0xb1, 0xf7, 0xe5, 0xb4, 0xf7, 0x9f, 0x88, 0x9c, 0x0a, + 0x13, 0x84, 0xf1, 0xd3, 0xd0, 0x48, 0x72, 0x2a, 0x6d, 0x87, 0x38, 0xa9, 0x44, 0xff, 0x0c, 0xba, + 0x89, 0x58, 0xce, 0xf3, 0x3d, 0x85, 0x11, 0x74, 0x6e, 0x13, 0x1c, 0xac, 0x51, 0x8b, 0x4a, 0xaf, + 0xf5, 0xc7, 0x23, 0x30, 0x9e, 0x22, 0x0a, 0x55, 0x27, 0xe5, 0xe3, 0x6c, 0xfb, 0x9e, 0x19, 0x58, + 0x94, 0xd7, 0x97, 0x62, 0xb4, 0x62, 0xaa, 0x61, 0x51, 0x1c, 0x95, 0xa0, 0x17, 0xba, 0xa6, 0xa8, + 0xea, 0xc8, 0xfd, 0x8a, 0x51, 0xf7, 0x42, 0x97, 0x97, 0x72, 0x14, 0x51, 0x6b, 0x60, 0x9b, 0x39, + 0x4d, 0x65, 0xa6, 0xa9, 0x63, 0x0d, 0xec, 0xe5, 0x8c, 0xb2, 0x59, 0x98, 0x08, 0x42, 0x07, 0xe7, + 0xe1, 0x15, 0x06, 0x1f, 0x8f, 0x58, 0x59, 0xfc, 0x09, 0x68, 0x59, 0x3d, 0x6a, 0x3f, 0xc0, 0xf2, + 0xfe, 0x2a, 0xbb, 0xbf, 0xc9, 0x89, 0xc2, 0x84, 0x13, 0xd0, 0x72, 0x7c, 0xab, 0x8f, 0xfb, 0xe6, + 0x86, 0xe3, 0xf7, 0xee, 0x91, 0x6e, 0x8d, 0x83, 0x38, 0x71, 0x91, 0xd1, 0xf4, 0xef, 0x60, 0x22, + 0x0a, 0xc1, 0xf2, 0xc5, 0x6c, 0x10, 0xa6, 0x60, 0x34, 0x24, 0x38, 0x30, 0xed, 0xbe, 0xe8, 0xae, + 0x5a, 0x74, 0x5c, 0xee, 0xa3, 0x53, 0x50, 0xe9, 0x5b, 0xd4, 0x62, 0x0e, 0x37, 0xe6, 0x0f, 0xcb, + 0xf2, 0xdf, 0x16, 0x46, 0x83, 0xc1, 0xf4, 0xcb, 0x80, 0x22, 0x16, 0xc9, 0x6a, 0x3f, 0x03, 0x55, + 0x12, 0x11, 0xc4, 0x63, 0x70, 0x24, 0xad, 0x25, 0x67, 0x89, 0xc1, 0x91, 0xfa, 0x13, 0x05, 0xb4, + 0x15, 0x4c, 0x03, 0xbb, 0x47, 0x2e, 0xf9, 0x41, 0xb6, 0xdb, 0xde, 0x72, 0x11, 0x9f, 0x85, 0xa6, + 0x6c, 0x67, 0x93, 0x60, 0xba, 0xfb, 0x6b, 0xdb, 0x90, 0xd0, 0x35, 0x4c, 0x93, 0xf2, 0xaf, 0xa4, + 0xcb, 0xff, 0x1a, 0x4c, 0xef, 0xe8, 0x89, 0x08, 0xd0, 0x0c, 0xd4, 0x5c, 0x06, 0x11, 0x11, 0xea, + 0x24, 0xcf, 0x25, 0x17, 0x35, 0x04, 0x5f, 0xbf, 0x05, 0x27, 0x77, 0x50, 0x96, 0xeb, 0x90, 0xfd, + 0xab, 0xec, 0xc2, 0x21, 0xa1, 0x72, 0x05, 0x53, 0x2b, 0x4a, 0xa3, 0x6c, 0x98, 0x55, 0x98, 0xda, + 0xc6, 0x11, 0xea, 0x3f, 0x86, 0x31, 0x57, 0xd0, 0xc4, 0x05, 0xdd, 0xfc, 0x05, 0xb1, 0x4c, 0x8c, + 0xd4, 0xff, 0x55, 0xe0, 0x40, 0x6e, 0xc0, 0x44, 0x89, 0xb9, 0x1b, 0xf8, 0xae, 0x29, 0x37, 0xa4, + 0xa4, 0x06, 0xdb, 0x11, 0x7d, 0x59, 0x90, 0x97, 0xfb, 0xe9, 0x22, 0x1d, 0xc9, 0x14, 0xa9, 0x07, + 0x35, 0xd6, 0xfa, 0x72, 0x32, 0x4e, 0x24, 0xa6, 0xb0, 0x10, 0xdd, 0xb4, 0xec, 0x60, 0x71, 0x21, + 0x1a, 0x36, 0x7f, 0x3e, 0x9f, 0x7e, 0xad, 0xe5, 0x8a, 0xcb, 0x2f, 0xf4, 0xad, 0x01, 0xc5, 0x81, + 0x21, 0x6e, 0x41, 0x1f, 0x40, 0x8d, 0xcf, 0xc3, 0x6e, 0x85, 0xdd, 0xd7, 0x92, 0xb5, 0x91, 0x1e, + 0x99, 0x02, 0xa2, 0xff, 0xac, 0x40, 0x95, 0x7b, 0xfa, 0xb6, 0x0a, 0x56, 0x85, 0x31, 0xec, 0xf5, + 0xfc, 0xbe, 0xed, 0x6d, 0xb2, 0x17, 0xa7, 0x6a, 0xc4, 0x67, 0x84, 0x44, 0xff, 0x46, 0x15, 0xd9, + 0x14, 0x4d, 0xba, 0x00, 0xad, 0x4c, 0xe5, 0x64, 0xd6, 0x1f, 0x65, 0x5f, 0xeb, 0x8f, 0x09, 0xcd, + 0x34, 0x07, 0x9d, 0x84, 0x0a, 0x7d, 0x34, 0xe0, 0x4f, 0x67, 0x7b, 0x7e, 0x5c, 0x4a, 0x33, 0xf6, + 0xfa, 0xa3, 0x01, 0x36, 0x18, 0x3b, 0xb2, 0x86, 0x4d, 0x70, 0x9e, 0x3e, 0xf6, 0x1d, 0x35, 0x0d, + 0x1b, 0x5f, 0xcc, 0xf4, 0xba, 0xc1, 0x0f, 0xfa, 0x4f, 0x0a, 0xb4, 0x93, 0x4a, 0xb9, 0x64, 0x3b, + 0xf8, 0x4d, 0x14, 0x8a, 0x0a, 0x63, 0x77, 0x6d, 0x07, 0x33, 0x1b, 0xf8, 0x75, 0xf1, 0xb9, 0x28, + 0x52, 0xef, 0x5f, 0x85, 0x7a, 0xec, 0x02, 0xaa, 0x43, 0x75, 0xe9, 0xd6, 0xed, 0x85, 0xeb, 0x9d, + 0x12, 0x6a, 0x41, 0xfd, 0xc6, 0xea, 0xba, 0xc9, 0x8f, 0x0a, 0x3a, 0x00, 0x0d, 0x63, 0xe9, 0xf2, + 0xd2, 0xd7, 0xe6, 0xca, 0xc2, 0xfa, 0x85, 0x2b, 0x9d, 0x11, 0x84, 0xa0, 0xcd, 0x09, 0x37, 0x56, + 0x05, 0xad, 0x3c, 0xff, 0xf7, 0x28, 0x8c, 0x49, 0x1b, 0xd1, 0x39, 0xa8, 0xdc, 0x0c, 0xc9, 0x16, + 0x3a, 0x94, 0x54, 0xea, 0x57, 0x81, 0x4d, 0xb1, 0xe8, 0x3c, 0x75, 0x6a, 0x1b, 0x9d, 0xf7, 0x9d, + 0x5e, 0x42, 0x17, 0xa1, 0x91, 0xda, 0xea, 0x50, 0xe1, 0x42, 0xaf, 0x1e, 0xc9, 0x50, 0xb3, 0x4f, + 0x83, 0x5e, 0x3a, 0xad, 0xa0, 0x55, 0x68, 0x33, 0x96, 0x5c, 0xe1, 0x08, 0x7a, 0x47, 0x8a, 0x14, + 0xad, 0xb5, 0xea, 0xd1, 0x1d, 0xb8, 0xb1, 0x59, 0x57, 0xa0, 0x91, 0x5a, 0x54, 0x90, 0x9a, 0x29, + 0xa0, 0xcc, 0x36, 0x97, 0x18, 0x57, 0xb0, 0x13, 0xe9, 0x25, 0x74, 0x47, 0x6c, 0x2c, 0xe9, 0x95, + 0x67, 0x57, 0x7d, 0xc7, 0x0b, 0x78, 0x05, 0x2e, 0x2f, 0x01, 0x24, 0xfb, 0x04, 0x3a, 0x9c, 0x11, + 0x4a, 0x6f, 0x47, 0xaa, 0x5a, 0xc4, 0x8a, 0xcd, 0x5b, 0x83, 0x4e, 0x7e, 0x2d, 0xd9, 0x4d, 0xd9, + 0xb1, 0xed, 0xac, 0x02, 0xdb, 0x16, 0xa1, 0x1e, 0x8f, 0x54, 0xd4, 0x2d, 0x98, 0xb2, 0x5c, 0xd9, + 0xce, 0xf3, 0x57, 0x2f, 0xa1, 0x4b, 0xd0, 0x5c, 0x70, 0x9c, 0xfd, 0xa8, 0x51, 0xd3, 0x1c, 0x92, + 0xd7, 0xe3, 0xc4, 0xaf, 0x7e, 0x7e, 0xc4, 0xa0, 0x77, 0xe3, 0xc6, 0xde, 0x75, 0x34, 0xab, 0xef, + 0xed, 0x89, 0x8b, 0x6f, 0xfb, 0x01, 0x8e, 0xee, 0x3a, 0xd0, 0xf6, 0x7d, 0xe7, 0xa9, 0x3d, 0x70, + 0x05, 0x51, 0x5f, 0x87, 0x03, 0xb9, 0xf9, 0x86, 0xb4, 0x9c, 0x96, 0xdc, 0x48, 0x54, 0xa7, 0x77, + 0xe4, 0x4b, 0xbd, 0x8b, 0x9f, 0x3f, 0x7d, 0xa1, 0x95, 0x9e, 0xbd, 0xd0, 0x4a, 0xaf, 0x5e, 0x68, + 0xca, 0x8f, 0x43, 0x4d, 0xf9, 0x7d, 0xa8, 0x29, 0x4f, 0x86, 0x9a, 0xf2, 0x74, 0xa8, 0x29, 0x7f, + 0x0d, 0x35, 0xe5, 0x9f, 0xa1, 0x56, 0x7a, 0x35, 0xd4, 0x94, 0x5f, 0x5e, 0x6a, 0xa5, 0xa7, 0x2f, + 0xb5, 0xd2, 0xb3, 0x97, 0x5a, 0xe9, 0x9b, 0x5a, 0xcf, 0xb1, 0xb1, 0x47, 0x37, 0x6a, 0xec, 0x3f, + 0xfe, 0xa3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xfc, 0x4c, 0xf5, 0x32, 0x10, 0x00, 0x00, } func (x MatchType) String() string { @@ -1781,6 +1815,9 @@ func (this *LabelValuesRequest) Equal(that interface{}) bool { if !this.Matchers.Equal(that1.Matchers) { return false } + if this.Limit != that1.Limit { + return false + } return true } func (this *LabelValuesResponse) Equal(that interface{}) bool { @@ -1866,6 +1903,9 @@ func (this *LabelNamesRequest) Equal(that interface{}) bool { if this.EndTimestampMs != that1.EndTimestampMs { return false } + if this.Limit != that1.Limit { + return false + } return true } func (this *LabelNamesResponse) Equal(that interface{}) bool { @@ -1981,6 +2021,9 @@ func (this *UserStatsResponse) Equal(that interface{}) bool { if this.ActiveSeries != that1.ActiveSeries { return false } + if this.LoadedBlocks != that1.LoadedBlocks { + return false + } return true } func (this *UserIDStatsResponse) Equal(that interface{}) bool { @@ -2072,6 +2115,9 @@ func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { return false } } + if this.Limit != that1.Limit { + return false + } return true } func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { @@ -2454,7 +2500,7 @@ func (this *LabelValuesRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&client.LabelValuesRequest{") s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") @@ -2462,6 +2508,7 @@ func (this *LabelValuesRequest) GoString() string { if this.Matchers != nil { s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") } + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2489,10 +2536,11 @@ func (this *LabelNamesRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&client.LabelNamesRequest{") s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2529,13 +2577,14 @@ func (this *UserStatsResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 10) s = append(s, "&client.UserStatsResponse{") s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") s = append(s, "ActiveSeries: "+fmt.Sprintf("%#v", this.ActiveSeries)+",\n") + s = append(s, "LoadedBlocks: "+fmt.Sprintf("%#v", this.LoadedBlocks)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2568,13 +2617,14 @@ func (this *MetricsForLabelMatchersRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&client.MetricsForLabelMatchersRequest{") s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") if this.MatchersSet != nil { s = append(s, "MatchersSet: "+fmt.Sprintf("%#v", this.MatchersSet)+",\n") } + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3585,6 +3635,11 @@ func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x28 + } if m.Matchers != nil { { size, err := m.Matchers.MarshalToSizedBuffer(dAtA[:i]) @@ -3701,6 +3756,11 @@ func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x18 + } if m.EndTimestampMs != 0 { i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) i-- @@ -3821,6 +3881,11 @@ func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LoadedBlocks != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.LoadedBlocks)) + i-- + dAtA[i] = 0x30 + } if m.ActiveSeries != 0 { i = encodeVarintIngester(dAtA, i, uint64(m.ActiveSeries)) i-- @@ -3951,6 +4016,11 @@ func (m *MetricsForLabelMatchersRequest) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x20 + } if len(m.MatchersSet) > 0 { for iNdEx := len(m.MatchersSet) - 1; iNdEx >= 0; iNdEx-- { { @@ -4500,6 +4570,9 @@ func (m *LabelValuesRequest) Size() (n int) { l = m.Matchers.Size() n += 1 + l + sovIngester(uint64(l)) } + if m.Limit != 0 { + n += 1 + sovIngester(uint64(m.Limit)) + } return n } @@ -4545,6 +4618,9 @@ func (m *LabelNamesRequest) Size() (n int) { if m.EndTimestampMs != 0 { n += 1 + sovIngester(uint64(m.EndTimestampMs)) } + if m.Limit != 0 { + n += 1 + sovIngester(uint64(m.Limit)) + } return n } @@ -4608,6 +4684,9 @@ func (m *UserStatsResponse) Size() (n int) { if m.ActiveSeries != 0 { n += 1 + sovIngester(uint64(m.ActiveSeries)) } + if m.LoadedBlocks != 0 { + n += 1 + sovIngester(uint64(m.LoadedBlocks)) + } return n } @@ -4661,6 +4740,9 @@ func (m *MetricsForLabelMatchersRequest) Size() (n int) { n += 1 + l + sovIngester(uint64(l)) } } + if m.Limit != 0 { + n += 1 + sovIngester(uint64(m.Limit)) + } return n } @@ -4953,6 +5035,7 @@ func (this *LabelValuesRequest) String() string { `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, `Matchers:` + strings.Replace(this.Matchers.String(), "LabelMatchers", "LabelMatchers", 1) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `}`, }, "") return s @@ -4984,6 +5067,7 @@ func (this *LabelNamesRequest) String() string { s := strings.Join([]string{`&LabelNamesRequest{`, `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `}`, }, "") return s @@ -5027,6 +5111,7 @@ func (this *UserStatsResponse) String() string { `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, `ActiveSeries:` + fmt.Sprintf("%v", this.ActiveSeries) + `,`, + `LoadedBlocks:` + fmt.Sprintf("%v", this.LoadedBlocks) + `,`, `}`, }, "") return s @@ -5070,6 +5155,7 @@ func (this *MetricsForLabelMatchersRequest) String() string { `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, `MatchersSet:` + repeatedStringForMatchersSet + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `}`, }, "") return s @@ -6027,6 +6113,25 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -6288,6 +6393,25 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -6635,6 +6759,25 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadedBlocks", wireType) + } + m.LoadedBlocks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LoadedBlocks |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -6968,6 +7111,25 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto index 3965808d6a..34b9662264 100644 --- a/pkg/ingester/client/ingester.proto +++ b/pkg/ingester/client/ingester.proto @@ -67,6 +67,7 @@ message LabelValuesRequest { int64 start_timestamp_ms = 2; int64 end_timestamp_ms = 3; LabelMatchers matchers = 4; + int64 limit = 5; } message LabelValuesResponse { @@ -80,6 +81,7 @@ message LabelValuesStreamResponse { message LabelNamesRequest { int64 start_timestamp_ms = 1; int64 end_timestamp_ms = 2; + int64 limit = 3; } message LabelNamesResponse { @@ -98,6 +100,7 @@ message UserStatsResponse { double api_ingestion_rate = 3; double rule_ingestion_rate = 4; uint64 active_series = 5; + uint64 loaded_blocks = 6; } message UserIDStatsResponse { @@ -113,6 +116,7 @@ message MetricsForLabelMatchersRequest { int64 start_timestamp_ms = 1; int64 end_timestamp_ms = 2; repeated LabelMatchers matchers_set = 3; + int64 limit = 4; } message MetricsForLabelMatchersResponse { diff --git a/pkg/distributor/http_admin.go b/pkg/ingester/http_admin.go similarity index 61% rename from pkg/distributor/http_admin.go rename to pkg/ingester/http_admin.go index 1aa13baff2..a0d2ca98c5 100644 --- a/pkg/distributor/http_admin.go +++ b/pkg/ingester/http_admin.go @@ -1,4 +1,4 @@ -package distributor +package ingester import ( "encoding/json" @@ -22,13 +22,16 @@ const tpl = `

Cortex Ingester Stats

Current time: {{ .Now }}

+ {{if (gt .ReplicationFactor 0)}}

NB stats do not account for replication factor, which is currently set to {{ .ReplicationFactor }}

+ {{end}}
+ @@ -40,6 +43,7 @@ const tpl = ` {{ range .Stats }} + @@ -53,31 +57,41 @@ const tpl = ` ` -var tmpl *template.Template +var UserStatsTmpl *template.Template func init() { - tmpl = template.Must(template.New("webpage").Parse(tpl)) + UserStatsTmpl = template.Must(template.New("webpage").Parse(tpl)) } -type userStatsByTimeseries []UserIDStats +type UserStatsByTimeseries []UserIDStats -func (s userStatsByTimeseries) Len() int { return len(s) } -func (s userStatsByTimeseries) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s UserStatsByTimeseries) Len() int { return len(s) } +func (s UserStatsByTimeseries) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s userStatsByTimeseries) Less(i, j int) bool { +func (s UserStatsByTimeseries) Less(i, j int) bool { return s[i].NumSeries > s[j].NumSeries || (s[i].NumSeries == s[j].NumSeries && s[i].UserID < s[j].UserID) } -// AllUserStatsHandler shows stats for all users. -func (d *Distributor) AllUserStatsHandler(w http.ResponseWriter, r *http.Request) { - stats, err := d.AllUserStats(r.Context()) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } +// UserIDStats models ingestion statistics for one user, including the user ID +type UserIDStats struct { + UserID string `json:"userID"` + UserStats +} + +// UserStats models ingestion statistics for one user. +type UserStats struct { + IngestionRate float64 `json:"ingestionRate"` + NumSeries uint64 `json:"numSeries"` + APIIngestionRate float64 `json:"APIIngestionRate"` + RuleIngestionRate float64 `json:"RuleIngestionRate"` + ActiveSeries uint64 `json:"activeSeries"` + LoadedBlocks uint64 `json:"loadedBlocks"` +} - sort.Sort(userStatsByTimeseries(stats)) +// AllUserStatsRender render data for all users or return in json format. +func AllUserStatsRender(w http.ResponseWriter, r *http.Request, stats []UserIDStats, rf int) { + sort.Sort(UserStatsByTimeseries(stats)) if encodings, found := r.Header["Accept"]; found && len(encodings) > 0 && strings.Contains(encodings[0], "json") { @@ -94,6 +108,6 @@ func (d *Distributor) AllUserStatsHandler(w http.ResponseWriter, r *http.Request }{ Now: time.Now(), Stats: stats, - ReplicationFactor: d.ingestersRing.ReplicationFactor(), - }, tmpl, r) + ReplicationFactor: rf, + }, UserStatsTmpl, r) } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index e6cf5d9701..82e4bff755 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "html" "io" "math" "net/http" @@ -1500,7 +1501,7 @@ func (i *Ingester) labelsValuesCommon(ctx context.Context, req *client.LabelValu return nil, cleanup, err } - labelName, startTimestampMs, endTimestampMs, matchers, err := client.FromLabelValuesRequest(req) + labelName, startTimestampMs, endTimestampMs, limit, matchers, err := client.FromLabelValuesRequest(req) if err != nil { return nil, cleanup, err } @@ -1534,11 +1535,15 @@ func (i *Ingester) labelsValuesCommon(ctx context.Context, req *client.LabelValu return nil, cleanup, err } defer c() - vals, _, err := q.LabelValues(ctx, labelName, nil, matchers...) + vals, _, err := q.LabelValues(ctx, labelName, &storage.LabelHints{Limit: limit}, matchers...) if err != nil { return nil, cleanup, err } + if limit > 0 && len(vals) > limit { + vals = vals[:limit] + } + return &client.LabelValuesResponse{ LabelValues: vals, }, cleanup, nil @@ -1601,6 +1606,8 @@ func (i *Ingester) labelNamesCommon(ctx context.Context, req *client.LabelNamesR return nil, cleanup, err } + limit := int(req.Limit) + q, err := db.Querier(mint, maxt) if err != nil { return nil, cleanup, err @@ -1615,11 +1622,15 @@ func (i *Ingester) labelNamesCommon(ctx context.Context, req *client.LabelNamesR return nil, cleanup, err } defer c() - names, _, err := q.LabelNames(ctx, nil) + names, _, err := q.LabelNames(ctx, &storage.LabelHints{Limit: limit}) if err != nil { return nil, cleanup, err } + if limit > 0 && len(names) > limit { + names = names[:limit] + } + return &client.LabelNamesResponse{ LabelNames: names, }, cleanup, nil @@ -1676,7 +1687,7 @@ func (i *Ingester) metricsForLabelMatchersCommon(ctx context.Context, req *clien } // Parse the request - _, _, matchersSet, err := client.FromMetricsForLabelMatchersRequest(req) + _, _, limit, matchersSet, err := client.FromMetricsForLabelMatchersRequest(req) if err != nil { return nil, cleanup, err } @@ -1705,6 +1716,7 @@ func (i *Ingester) metricsForLabelMatchersCommon(ctx context.Context, req *clien Start: mint, End: maxt, Func: "series", // There is no series function, this token is used for lookups that don't need samples. + Limit: limit, } if len(matchersSet) > 1 { for _, matchers := range matchersSet { @@ -1726,15 +1738,20 @@ func (i *Ingester) metricsForLabelMatchersCommon(ctx context.Context, req *clien Metric: make([]*cortexpb.Metric, 0), } + cnt := 0 for mergedSet.Next() { + cnt++ // Interrupt if the context has been canceled. - if ctx.Err() != nil { + if cnt%util.CheckContextEveryNIterations == 0 && ctx.Err() != nil { return nil, cleanup, ctx.Err() } result.Metric = append(result.Metric, &cortexpb.Metric{ Labels: cortexpb.FromLabelsToLabelAdapters(mergedSet.At().Labels()), }) + if limit > 0 && len(result.Metric) >= limit { + break + } } return result, cleanup, nil @@ -1788,7 +1805,42 @@ func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest) return &client.UserStatsResponse{}, nil } - return createUserStats(db, i.cfg.ActiveSeriesMetricsEnabled), nil + userStat := createUserStats(db, i.cfg.ActiveSeriesMetricsEnabled) + + return &client.UserStatsResponse{ + IngestionRate: userStat.IngestionRate, + NumSeries: userStat.NumSeries, + ApiIngestionRate: userStat.APIIngestionRate, + RuleIngestionRate: userStat.RuleIngestionRate, + ActiveSeries: userStat.ActiveSeries, + LoadedBlocks: userStat.LoadedBlocks, + }, nil +} + +func (i *Ingester) userStats() []UserIDStats { + i.stoppedMtx.RLock() + defer i.stoppedMtx.RUnlock() + + perUserTotals := make(map[string]UserStats) + + users := i.TSDBState.dbs + + response := make([]UserIDStats, 0, len(perUserTotals)) + for id, db := range users { + response = append(response, UserIDStats{ + UserID: id, + UserStats: createUserStats(db, i.cfg.ActiveSeriesMetricsEnabled), + }) + } + + return response +} + +// AllUserStatsHandler shows stats for all users. +func (i *Ingester) AllUserStatsHandler(w http.ResponseWriter, r *http.Request) { + stats := i.userStats() + + AllUserStatsRender(w, r, stats, 0) } // AllUserStats returns ingestion statistics for all users known to this ingester. @@ -1797,24 +1849,28 @@ func (i *Ingester) AllUserStats(_ context.Context, _ *client.UserStatsRequest) ( return nil, err } - i.stoppedMtx.RLock() - defer i.stoppedMtx.RUnlock() - - users := i.TSDBState.dbs + userStats := i.userStats() response := &client.UsersStatsResponse{ - Stats: make([]*client.UserIDStatsResponse, 0, len(users)), + Stats: make([]*client.UserIDStatsResponse, 0, len(userStats)), } - for userID, db := range users { + for _, userStat := range userStats { response.Stats = append(response.Stats, &client.UserIDStatsResponse{ - UserId: userID, - Data: createUserStats(db, i.cfg.ActiveSeriesMetricsEnabled), + UserId: userStat.UserID, + Data: &client.UserStatsResponse{ + IngestionRate: userStat.IngestionRate, + NumSeries: userStat.NumSeries, + ApiIngestionRate: userStat.APIIngestionRate, + RuleIngestionRate: userStat.RuleIngestionRate, + ActiveSeries: userStat.ActiveSeries, + LoadedBlocks: userStat.LoadedBlocks, + }, }) } return response, nil } -func createUserStats(db *userTSDB, activeSeriesMetricsEnabled bool) *client.UserStatsResponse { +func createUserStats(db *userTSDB, activeSeriesMetricsEnabled bool) UserStats { apiRate := db.ingestedAPISamples.Rate() ruleRate := db.ingestedRuleSamples.Rate() @@ -1823,12 +1879,13 @@ func createUserStats(db *userTSDB, activeSeriesMetricsEnabled bool) *client.User activeSeries = uint64(db.activeSeries.Active()) } - return &client.UserStatsResponse{ + return UserStats{ IngestionRate: apiRate + ruleRate, - ApiIngestionRate: apiRate, + APIIngestionRate: apiRate, RuleIngestionRate: ruleRate, NumSeries: db.Head().NumSeries(), ActiveSeries: activeSeries, + LoadedBlocks: uint64(len(db.Blocks())), } } @@ -2865,6 +2922,61 @@ func (i *Ingester) flushHandler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } +// ModeHandler Change mode of ingester. It will also update set unregisterOnShutdown to true if READONLY mode +func (i *Ingester) ModeHandler(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + if err != nil { + respMsg := "failed to parse HTTP request in mode handler" + level.Warn(logutil.WithContext(r.Context(), i.logger)).Log("msg", respMsg, "err", err) + w.WriteHeader(http.StatusBadRequest) + // We ignore errors here, because we cannot do anything about them. + _, _ = w.Write([]byte(respMsg)) + return + } + + currentState := i.lifecycler.GetState() + reqMode := strings.ToUpper(r.Form.Get("mode")) + switch reqMode { + case "READONLY": + if currentState != ring.READONLY { + err = i.lifecycler.ChangeState(r.Context(), ring.READONLY) + if err != nil { + respMsg := fmt.Sprintf("failed to change state: %s", err) + level.Warn(logutil.WithContext(r.Context(), i.logger)).Log("msg", respMsg) + w.WriteHeader(http.StatusBadRequest) + // We ignore errors here, because we cannot do anything about them. + _, _ = w.Write([]byte(respMsg)) + return + } + } + case "ACTIVE": + if currentState != ring.ACTIVE { + err = i.lifecycler.ChangeState(r.Context(), ring.ACTIVE) + if err != nil { + respMsg := fmt.Sprintf("failed to change state: %s", err) + level.Warn(logutil.WithContext(r.Context(), i.logger)).Log("msg", respMsg) + w.WriteHeader(http.StatusBadRequest) + // We ignore errors here, because we cannot do anything about them. + _, _ = w.Write([]byte(respMsg)) + return + } + } + default: + respMsg := fmt.Sprintf("invalid mode input: %s", html.EscapeString(reqMode)) + level.Warn(logutil.WithContext(r.Context(), i.logger)).Log("msg", respMsg) + w.WriteHeader(http.StatusBadRequest) + // We ignore errors here, because we cannot do anything about them. + _, _ = w.Write([]byte(respMsg)) + return + } + + respMsg := fmt.Sprintf("Ingester mode %s", i.lifecycler.GetState()) + level.Info(logutil.WithContext(r.Context(), i.logger)).Log("msg", respMsg) + w.WriteHeader(http.StatusOK) + // We ignore errors here, because we cannot do anything about them. + _, _ = w.Write([]byte(respMsg)) +} + // metadataQueryRange returns the best range to query for metadata queries based on the timerange in the ingester. func metadataQueryRange(queryStart, queryEnd int64, db *userTSDB, queryIngestersWithin time.Duration) (mint, maxt int64, err error) { if queryIngestersWithin > 0 { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 436757486a..c631db582b 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -185,7 +185,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { # TYPE cortex_ingester_usage_per_labelset gauge cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 - `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total")) + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset")) // Should impose limits for _, set := range limits.LimitsPerLabelSet { @@ -653,8 +653,6 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_memory_users", "cortex_ingester_memory_series_created_total", "cortex_ingester_memory_series_removed_total", - "cortex_discarded_samples_total", - "cortex_ingester_active_series", } userID := "test" @@ -695,7 +693,7 @@ func TestIngester_Push(t *testing.T) { expectedMetadataIngested: []*cortexpb.MetricMetadata{ {MetricFamilyName: "metric_name_2", Help: "a help for metric_name_2", Unit: "", Type: cortexpb.GAUGE}, }, - additionalMetrics: []string{}, + additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series"}, disableNativeHistogram: true, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -757,6 +755,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_memory_metadata_created_total", "cortex_ingester_ingested_metadata_total", "cortex_ingester_ingested_metadata_failures_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_metadata_failures_total The total number of metadata that errored on ingestion. @@ -854,6 +853,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage", "cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds", "cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -964,6 +964,8 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_ingester_tsdb_out_of_order_samples_total", "cortex_ingester_tsdb_head_out_of_order_samples_appended_total", + "cortex_discarded_samples_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1020,6 +1022,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, + additionalMetrics: []string{"cortex_ingester_active_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1067,6 +1070,10 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, + additionalMetrics: []string{ + "cortex_discarded_samples_total", + "cortex_ingester_active_series", + }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1113,6 +1120,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (60 * 1000)}, {Value: 2, TimestampMs: 1575043969}}}, }, + additionalMetrics: []string{"cortex_ingester_active_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1156,6 +1164,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, + additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1214,6 +1223,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage", "cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds", "cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1274,6 +1284,7 @@ func TestIngester_Push(t *testing.T) { }, additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1323,6 +1334,7 @@ func TestIngester_Push(t *testing.T) { }, additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1373,6 +1385,7 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", "cortex_ingester_tsdb_out_of_order_samples_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -2204,7 +2217,7 @@ func Test_Ingester_LabelNames(t *testing.T) { {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, } - expected := []string{"__name__", "status", "route"} + expected := []string{"__name__", "route", "status"} // Create ingester i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) @@ -2226,10 +2239,27 @@ func Test_Ingester_LabelNames(t *testing.T) { require.NoError(t, err) } - // Get label names - res, err := i.LabelNames(ctx, &client.LabelNamesRequest{}) - require.NoError(t, err) - assert.ElementsMatch(t, expected, res.LabelNames) + tests := map[string]struct { + limit int + expected []string + }{ + "should return all label names if no limit is set": { + expected: expected, + }, + "should return limited label names if a limit is set": { + limit: 2, + expected: expected[:2], + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + // Get label names + res, err := i.LabelNames(ctx, &client.LabelNamesRequest{Limit: int64(testData.limit)}) + require.NoError(t, err) + assert.ElementsMatch(t, testData.expected, res.LabelNames) + }) + } } func Test_Ingester_LabelValues(t *testing.T) { @@ -2270,13 +2300,31 @@ func Test_Ingester_LabelValues(t *testing.T) { require.NoError(t, err) } - // Get label values - for labelName, expectedValues := range expected { - req := &client.LabelValuesRequest{LabelName: labelName} - res, err := i.LabelValues(ctx, req) - require.NoError(t, err) - assert.ElementsMatch(t, expectedValues, res.LabelValues) + tests := map[string]struct { + limit int64 + }{ + "should return all label values if no limit is set": { + limit: 0, + }, + "should return limited label values if a limit is set": { + limit: 1, + }, } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + for labelName, expectedValues := range expected { + req := &client.LabelValuesRequest{LabelName: labelName, Limit: testData.limit} + res, err := i.LabelValues(ctx, req) + require.NoError(t, err) + if testData.limit > 0 && len(expectedValues) > int(testData.limit) { + expectedValues = expectedValues[:testData.limit] + } + assert.ElementsMatch(t, expectedValues, res.LabelValues) + } + }) + } + } func Test_Ingester_LabelValue_MaxInflightQueryRequest(t *testing.T) { @@ -2635,6 +2683,7 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { tests := map[string]struct { from int64 to int64 + limit int64 matchers []*client.LabelMatchers expected []*cortexpb.Metric queryIngestersWithin time.Duration @@ -2695,7 +2744,7 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { {Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[1].lbls)}, }, }, - "should filter metrics by time range if queryStoreForLabels and queryIngestersWithin is enabled": { + "should filter metrics by time range if queryIngestersWithin is enabled": { from: 99999, to: 100001, matchers: []*client.LabelMatchers{{ @@ -2742,6 +2791,26 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { {Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[4].lbls)}, }, }, + "should return only limited results": { + from: math.MinInt64, + to: math.MaxInt64, + limit: 1, + matchers: []*client.LabelMatchers{ + { + Matchers: []*client.LabelMatcher{ + {Type: client.EQUAL, Name: "status", Value: "200"}, + }, + }, + { + Matchers: []*client.LabelMatcher{ + {Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_2"}, + }, + }, + }, + expected: []*cortexpb.Metric{ + {Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[0].lbls)}, + }, + }, } // Create ingester @@ -2773,6 +2842,7 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { StartTimestampMs: testData.from, EndTimestampMs: testData.to, MatchersSet: testData.matchers, + Limit: testData.limit, } i.cfg.QueryIngestersWithin = testData.queryIngestersWithin res, err := i.MetricsForLabelMatchers(ctx, req) @@ -4191,6 +4261,7 @@ func Test_Ingester_AllUserStats(t *testing.T) { ApiIngestionRate: 0.2, RuleIngestionRate: 0, ActiveSeries: 3, + LoadedBlocks: 0, }, }, { @@ -4201,12 +4272,91 @@ func Test_Ingester_AllUserStats(t *testing.T) { ApiIngestionRate: 0.13333333333333333, RuleIngestionRate: 0, ActiveSeries: 2, + LoadedBlocks: 0, }, }, } assert.ElementsMatch(t, expect, res.Stats) } +func Test_Ingester_AllUserStatsHandler(t *testing.T) { + series := []struct { + user string + lbls labels.Labels + value float64 + timestamp int64 + }{ + {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000}, + {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000}, + {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_2"}}, 2, 200000}, + {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_1"}}, 2, 200000}, + {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_2"}}, 2, 200000}, + } + + // Create ingester + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + + // Wait until it's ACTIVE + test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + return i.lifecycler.GetState() + }) + for _, series := range series { + ctx := user.InjectOrgID(context.Background(), series.user) + req, _ := mockWriteRequest(t, series.lbls, series.value, series.timestamp) + _, err := i.Push(ctx, req) + require.NoError(t, err) + } + + // Force compaction to test loaded blocks + compactionCallbackCh := make(chan struct{}) + i.TSDBState.forceCompactTrigger <- requestWithUsersAndCallback{users: nil, callback: compactionCallbackCh} + <-compactionCallbackCh + + // force update statistics + for _, db := range i.TSDBState.dbs { + db.ingestedAPISamples.Tick() + db.ingestedRuleSamples.Tick() + } + + // Get label names + response := httptest.NewRecorder() + request := httptest.NewRequest("GET", "/all_user_stats", nil) + request.Header.Add("Accept", "application/json") + i.AllUserStatsHandler(response, request) + var resp UserStatsByTimeseries + err = json.Unmarshal(response.Body.Bytes(), &resp) + require.NoError(t, err) + + expect := UserStatsByTimeseries{ + { + UserID: "user-1", + UserStats: UserStats{ + IngestionRate: 0.2, + NumSeries: 0, + APIIngestionRate: 0.2, + RuleIngestionRate: 0, + ActiveSeries: 3, + LoadedBlocks: 1, + }, + }, + { + UserID: "user-2", + UserStats: UserStats{ + IngestionRate: 0.13333333333333333, + NumSeries: 0, + APIIngestionRate: 0.13333333333333333, + RuleIngestionRate: 0, + ActiveSeries: 2, + LoadedBlocks: 1, + }, + }, + } + assert.ElementsMatch(t, expect, resp) +} + func TestIngesterCompactIdleBlock(t *testing.T) { cfg := defaultIngesterTestConfig(t) cfg.LifecyclerConfig.JoinAfter = 0 @@ -4317,8 +4467,10 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { require.Equal(t, int64(1), i.TSDBState.seriesCount.Load()) - metricsToCheck := []string{memSeriesCreatedTotalName, memSeriesRemovedTotalName, "cortex_ingester_memory_users", "cortex_ingester_active_series", - "cortex_ingester_memory_metadata", "cortex_ingester_memory_metadata_created_total", "cortex_ingester_memory_metadata_removed_total"} + userMetrics := []string{memSeriesCreatedTotalName, memSeriesRemovedTotalName, "cortex_ingester_active_series"} + + globalMetrics := []string{"cortex_ingester_memory_users", "cortex_ingester_memory_metadata"} + metricsToCheck := append(userMetrics, globalMetrics...) require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(` # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. @@ -4358,24 +4510,19 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { require.Equal(t, int64(0), i.TSDBState.seriesCount.Load()) // Flushing removed all series from memory. // Verify that user has disappeared from metrics. - require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(` - # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. - # TYPE cortex_ingester_memory_series_created_total counter - - # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user. - # TYPE cortex_ingester_memory_series_removed_total counter + err = testutil.GatherAndCompare(r, strings.NewReader(""), userMetrics...) + require.ErrorContains(t, err, "expected metric name(s) not found") + require.ErrorContains(t, err, strings.Join(userMetrics, " ")) + require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(` # HELP cortex_ingester_memory_users The current number of users in memory. # TYPE cortex_ingester_memory_users gauge cortex_ingester_memory_users 0 - # HELP cortex_ingester_active_series Number of currently active series per user. - # TYPE cortex_ingester_active_series gauge - # HELP cortex_ingester_memory_metadata The current number of metadata in memory. # TYPE cortex_ingester_memory_metadata gauge cortex_ingester_memory_metadata 0 - `), metricsToCheck...)) + `), "cortex_ingester_memory_users", "cortex_ingester_memory_metadata")) // Pushing another sample will recreate TSDB. pushSingleSampleWithMetadata(t, i) @@ -5050,6 +5197,117 @@ func generateSamplesForLabel(l labels.Labels, count int) *cortexpb.WriteRequest return cortexpb.ToWriteRequest(lbls, samples, nil, nil, cortexpb.API) } +func Test_Ingester_ModeHandler(t *testing.T) { + tests := map[string]struct { + method string + requestBody io.Reader + requestUrl string + initialState ring.InstanceState + mode string + expectedState ring.InstanceState + expectedResponse int + }{ + "should change to READONLY mode": { + method: "POST", + initialState: ring.ACTIVE, + requestUrl: "/mode?mode=reAdOnLy", + expectedState: ring.READONLY, + expectedResponse: http.StatusOK, + }, + "should change mode on GET method": { + method: "GET", + initialState: ring.ACTIVE, + requestUrl: "/mode?mode=READONLY", + expectedState: ring.READONLY, + expectedResponse: http.StatusOK, + }, + "should change mode on POST method via body": { + method: "POST", + initialState: ring.ACTIVE, + requestUrl: "/mode", + requestBody: strings.NewReader("mode=readonly"), + expectedState: ring.READONLY, + expectedResponse: http.StatusOK, + }, + "should change to ACTIVE mode": { + method: "POST", + initialState: ring.READONLY, + requestUrl: "/mode?mode=active", + expectedState: ring.ACTIVE, + expectedResponse: http.StatusOK, + }, + "should fail to unknown mode": { + method: "POST", + initialState: ring.ACTIVE, + requestUrl: "/mode?mode=NotSupported", + expectedState: ring.ACTIVE, + expectedResponse: http.StatusBadRequest, + }, + "should maintain in readonly": { + method: "POST", + initialState: ring.READONLY, + requestUrl: "/mode?mode=READONLY", + expectedState: ring.READONLY, + expectedResponse: http.StatusOK, + }, + "should maintain in active": { + method: "POST", + initialState: ring.ACTIVE, + requestUrl: "/mode?mode=ACTIVE", + expectedState: ring.ACTIVE, + expectedResponse: http.StatusOK, + }, + "should fail mode READONLY if LEAVING state": { + method: "POST", + initialState: ring.LEAVING, + requestUrl: "/mode?mode=READONLY", + expectedState: ring.LEAVING, + expectedResponse: http.StatusBadRequest, + }, + "should fail with malformatted request": { + method: "GET", + initialState: ring.ACTIVE, + requestUrl: "/mode?mod;e=READONLY", + expectedResponse: http.StatusBadRequest, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + cfg := defaultIngesterTestConfig(t) + i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry()) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + + // Wait until it's ACTIVE + test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + return i.lifecycler.GetState() + }) + + if testData.initialState != ring.ACTIVE { + err = i.lifecycler.ChangeState(context.Background(), testData.initialState) + require.NoError(t, err) + + // Wait until initial state + test.Poll(t, 1*time.Second, testData.initialState, func() interface{} { + return i.lifecycler.GetState() + }) + } + + response := httptest.NewRecorder() + request := httptest.NewRequest(testData.method, testData.requestUrl, testData.requestBody) + if testData.requestBody != nil { + request.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value") + } + i.ModeHandler(response, request) + + require.Equal(t, testData.expectedResponse, response.Code) + require.Equal(t, testData.expectedState, i.lifecycler.GetState()) + }) + } +} + // mockTenantLimits exposes per-tenant limits based on a provided map type mockTenantLimits struct { limits map[string]*validation.Limits diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index 5bd933c209..a2f5a50f46 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -27,12 +27,12 @@ import ( type Distributor interface { QueryStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (*client.QueryStreamResponse, error) QueryExemplars(ctx context.Context, from, to model.Time, matchers ...[]*labels.Matcher) (*client.ExemplarQueryResponse, error) - LabelValuesForLabelName(ctx context.Context, from, to model.Time, label model.LabelName, matchers ...*labels.Matcher) ([]string, error) - LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, label model.LabelName, matchers ...*labels.Matcher) ([]string, error) - LabelNames(context.Context, model.Time, model.Time) ([]string, error) - LabelNamesStream(context.Context, model.Time, model.Time) ([]string, error) - MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) - MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) + LabelValuesForLabelName(ctx context.Context, from, to model.Time, label model.LabelName, hint *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) + LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, label model.LabelName, hint *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) + LabelNames(context.Context, model.Time, model.Time, *storage.LabelHints) ([]string, error) + LabelNamesStream(context.Context, model.Time, model.Time, *storage.LabelHints) ([]string, error) + MetricsForLabelMatchers(ctx context.Context, from, through model.Time, hint *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) + MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, hint *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) } @@ -115,9 +115,9 @@ func (q *distributorQuerier) Select(ctx context.Context, sortSeries bool, sp *st ) if q.streamingMetadata { - ms, err = q.distributor.MetricsForLabelMatchersStream(ctx, model.Time(minT), model.Time(maxT), matchers...) + ms, err = q.distributor.MetricsForLabelMatchersStream(ctx, model.Time(minT), model.Time(maxT), sp, matchers...) } else { - ms, err = q.distributor.MetricsForLabelMatchers(ctx, model.Time(minT), model.Time(maxT), matchers...) + ms, err = q.distributor.MetricsForLabelMatchers(ctx, model.Time(minT), model.Time(maxT), sp, matchers...) } if err != nil { @@ -171,9 +171,9 @@ func (q *distributorQuerier) LabelValues(ctx context.Context, name string, hints ) if q.streamingMetadata { - lvs, err = q.distributor.LabelValuesForLabelNameStream(ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) + lvs, err = q.distributor.LabelValuesForLabelNameStream(ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), hints, matchers...) } else { - lvs, err = q.distributor.LabelValuesForLabelName(ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) + lvs, err = q.distributor.LabelValuesForLabelName(ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), hints, matchers...) } return lvs, nil, err @@ -181,7 +181,7 @@ func (q *distributorQuerier) LabelValues(ctx context.Context, name string, hints func (q *distributorQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { if len(matchers) > 0 { - return q.labelNamesWithMatchers(ctx, matchers...) + return q.labelNamesWithMatchers(ctx, hints, matchers...) } log, ctx := spanlogger.New(ctx, "distributorQuerier.LabelNames") @@ -193,16 +193,16 @@ func (q *distributorQuerier) LabelNames(ctx context.Context, hints *storage.Labe ) if q.streamingMetadata { - ln, err = q.distributor.LabelNamesStream(ctx, model.Time(q.mint), model.Time(q.maxt)) + ln, err = q.distributor.LabelNamesStream(ctx, model.Time(q.mint), model.Time(q.maxt), hints) } else { - ln, err = q.distributor.LabelNames(ctx, model.Time(q.mint), model.Time(q.maxt)) + ln, err = q.distributor.LabelNames(ctx, model.Time(q.mint), model.Time(q.maxt), hints) } return ln, nil, err } // labelNamesWithMatchers performs the LabelNames call by calling ingester's MetricsForLabelMatchers method -func (q *distributorQuerier) labelNamesWithMatchers(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *distributorQuerier) labelNamesWithMatchers(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { log, ctx := spanlogger.New(ctx, "distributorQuerier.labelNamesWithMatchers") defer log.Span.Finish() @@ -212,9 +212,9 @@ func (q *distributorQuerier) labelNamesWithMatchers(ctx context.Context, matcher ) if q.streamingMetadata { - ms, err = q.distributor.MetricsForLabelMatchersStream(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) + ms, err = q.distributor.MetricsForLabelMatchersStream(ctx, model.Time(q.mint), model.Time(q.maxt), labelHintsToSelectHints(hints), matchers...) } else { - ms, err = q.distributor.MetricsForLabelMatchers(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) + ms, err = q.distributor.MetricsForLabelMatchers(ctx, model.Time(q.mint), model.Time(q.maxt), labelHintsToSelectHints(hints), matchers...) } if err != nil { @@ -280,3 +280,13 @@ func (q *distributorExemplarQuerier) Select(start, end int64, matchers ...[]*lab } return ret, nil } + +func labelHintsToSelectHints(hints *storage.LabelHints) *storage.SelectHints { + if hints == nil { + return nil + } + + return &storage.SelectHints{ + Limit: hints.Limit, + } +} diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index fdef2c0aaf..c5decf75e9 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -85,8 +85,8 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) distributor := &MockDistributor{} distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&client.QueryStreamResponse{}, nil) - distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]model.Metric{}, nil) - distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]model.Metric{}, nil) + distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]model.Metric{}, nil) + distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]model.Metric{}, nil) ctx := user.InjectOrgID(context.Background(), "test") queryable := newDistributorQueryable(distributor, streamingMetadataEnabled, nil, testData.queryIngestersWithin) @@ -213,9 +213,9 @@ func TestDistributorQuerier_LabelNames(t *testing.T) { {"job": "baz", "foo": "boom"}, } d := &MockDistributor{} - d.On("MetricsForLabelMatchers", mock.Anything, model.Time(mint), model.Time(maxt), someMatchers). + d.On("MetricsForLabelMatchers", mock.Anything, model.Time(mint), model.Time(maxt), mock.Anything, someMatchers). Return(metrics, nil) - d.On("MetricsForLabelMatchersStream", mock.Anything, model.Time(mint), model.Time(maxt), someMatchers). + d.On("MetricsForLabelMatchersStream", mock.Anything, model.Time(mint), model.Time(maxt), mock.Anything, someMatchers). Return(metrics, nil) queryable := newDistributorQueryable(d, streamingEnabled, nil, 0) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 7eab5170aa..15b1911469 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -47,7 +47,6 @@ type Config struct { IngesterMetadataStreaming bool `yaml:"ingester_metadata_streaming"` MaxSamples int `yaml:"max_samples"` QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` - AtModifierEnabled bool `yaml:"at_modifier_enabled" doc:"hidden"` EnablePerStepStats bool `yaml:"per_step_stats_enabled"` // Use compression when returning promql response. Supported values 'gzip', 'snappy', and '' (disable compression) @@ -97,8 +96,6 @@ var ( // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods - flagext.DeprecatedFlag(f, "querier.at-modifier-enabled", "This flag is no longer functional; at-modifier is always enabled now.", util_log.Logger) //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods flagext.DeprecatedFlag(f, "querier.ingester-streaming", "Deprecated: Use streaming RPCs to query ingester. QueryStream is always enabled and the flag is not effective anymore.", util_log.Logger) //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods @@ -111,7 +108,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.StoreGatewayClient.RegisterFlagsWithPrefix("querier.store-gateway-client", f) f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, "The maximum number of concurrent queries.") f.DurationVar(&cfg.Timeout, "querier.timeout", 2*time.Minute, "The timeout for a query.") - f.BoolVar(&cfg.IngesterMetadataStreaming, "querier.ingester-metadata-streaming", false, "Use streaming RPCs for metadata APIs from ingester.") + f.BoolVar(&cfg.IngesterMetadataStreaming, "querier.ingester-metadata-streaming", true, "Deprecated (This feature will be always on after v1.18): Use streaming RPCs for metadata APIs from ingester.") f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.BoolVar(&cfg.EnablePerStepStats, "querier.per-step-stats-enabled", false, "Enable returning samples stats per steps in query response.") diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 5175af939b..17b4be651d 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1078,8 +1078,8 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { t.Run("series", func(t *testing.T) { distributor := &MockDistributor{} - distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]model.Metric{}, nil) - distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]model.Metric{}, nil) + distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]model.Metric{}, nil) + distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]model.Metric{}, nil) queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) @@ -1119,8 +1119,8 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { t.Run("label names", func(t *testing.T) { distributor := &MockDistributor{} - distributor.On("LabelNames", mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) - distributor.On("LabelNamesStream", mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) + distributor.On("LabelNames", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) + distributor.On("LabelNamesStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) @@ -1147,8 +1147,8 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { labels.MustNewMatcher(labels.MatchNotEqual, "route", "get_user"), } distributor := &MockDistributor{} - distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, matchers).Return([]model.Metric{}, nil) - distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, matchers).Return([]model.Metric{}, nil) + distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything, matchers).Return([]model.Metric{}, nil) + distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, matchers).Return([]model.Metric{}, nil) queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) @@ -1165,7 +1165,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { args := distributor.Calls[0].Arguments assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(args.Get(1).(model.Time)), delta) assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(args.Get(2).(model.Time)), delta) - assert.Equal(t, matchers, args.Get(3).([]*labels.Matcher)) + assert.Equal(t, matchers, args.Get(4).([]*labels.Matcher)) } else { // Ensure no query has been executed (because skipped). assert.Len(t, distributor.Calls, 0) @@ -1174,8 +1174,8 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { t.Run("label values", func(t *testing.T) { distributor := &MockDistributor{} - distributor.On("LabelValuesForLabelName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) - distributor.On("LabelValuesForLabelNameStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) + distributor.On("LabelValuesForLabelName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) + distributor.On("LabelValuesForLabelNameStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) @@ -1303,22 +1303,22 @@ func (m *errDistributor) QueryStream(ctx context.Context, from, to model.Time, m func (m *errDistributor) QueryExemplars(ctx context.Context, from, to model.Time, matchers ...[]*labels.Matcher) (*client.ExemplarQueryResponse, error) { return nil, errDistributorError } -func (m *errDistributor) LabelValuesForLabelName(context.Context, model.Time, model.Time, model.LabelName, ...*labels.Matcher) ([]string, error) { +func (m *errDistributor) LabelValuesForLabelName(context.Context, model.Time, model.Time, model.LabelName, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { return nil, errDistributorError } -func (m *errDistributor) LabelValuesForLabelNameStream(context.Context, model.Time, model.Time, model.LabelName, ...*labels.Matcher) ([]string, error) { +func (m *errDistributor) LabelValuesForLabelNameStream(context.Context, model.Time, model.Time, model.LabelName, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { return nil, errDistributorError } -func (m *errDistributor) LabelNames(context.Context, model.Time, model.Time) ([]string, error) { +func (m *errDistributor) LabelNames(context.Context, model.Time, model.Time, *storage.LabelHints) ([]string, error) { return nil, errDistributorError } -func (m *errDistributor) LabelNamesStream(context.Context, model.Time, model.Time) ([]string, error) { +func (m *errDistributor) LabelNamesStream(context.Context, model.Time, model.Time, *storage.LabelHints) ([]string, error) { return nil, errDistributorError } -func (m *errDistributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) { +func (m *errDistributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, hints *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) { return nil, errDistributorError } -func (m *errDistributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) { +func (m *errDistributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, hints *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) { return nil, errDistributorError } @@ -1354,27 +1354,27 @@ func (d *emptyDistributor) QueryExemplars(ctx context.Context, from, to model.Ti return nil, nil } -func (d *emptyDistributor) LabelValuesForLabelName(context.Context, model.Time, model.Time, model.LabelName, ...*labels.Matcher) ([]string, error) { +func (d *emptyDistributor) LabelValuesForLabelName(context.Context, model.Time, model.Time, model.LabelName, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { return nil, nil } -func (d *emptyDistributor) LabelValuesForLabelNameStream(context.Context, model.Time, model.Time, model.LabelName, ...*labels.Matcher) ([]string, error) { +func (d *emptyDistributor) LabelValuesForLabelNameStream(context.Context, model.Time, model.Time, model.LabelName, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { return nil, nil } -func (d *emptyDistributor) LabelNames(context.Context, model.Time, model.Time) ([]string, error) { +func (d *emptyDistributor) LabelNames(context.Context, model.Time, model.Time, *storage.LabelHints) ([]string, error) { return nil, nil } -func (d *emptyDistributor) LabelNamesStream(context.Context, model.Time, model.Time) ([]string, error) { +func (d *emptyDistributor) LabelNamesStream(context.Context, model.Time, model.Time, *storage.LabelHints) ([]string, error) { return nil, nil } -func (d *emptyDistributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) { +func (d *emptyDistributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, hints *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) { return nil, nil } -func (d *emptyDistributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) { +func (d *emptyDistributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, hints *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) { return nil, nil } @@ -1411,7 +1411,6 @@ type mockStoreQuerier struct { // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. func (q *mockStoreQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - // We will hit this for /series lookup when -querier.query-store-for-labels-enabled is set. // If we don't skip here, it'll make /series lookups extremely slow as all the chunks will be loaded. // That flag is only to be set with blocks storage engine, and this is a protective measure. if sp != nil && sp.Func == "series" { diff --git a/pkg/querier/series/series_set.go b/pkg/querier/series/series_set.go index 0605ac9437..f246d73816 100644 --- a/pkg/querier/series/series_set.go +++ b/pkg/querier/series/series_set.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/prometheus/util/annotations" "github.com/cortexproject/cortex/pkg/querier/iterators" + "github.com/cortexproject/cortex/pkg/util" ) // ConcreteSeriesSet implements storage.SeriesSet. @@ -143,8 +144,8 @@ func MatrixToSeriesSet(sortSeries bool, m model.Matrix) storage.SeriesSet { // MetricsToSeriesSet creates a storage.SeriesSet from a []metric.Metric func MetricsToSeriesSet(ctx context.Context, sortSeries bool, ms []model.Metric) storage.SeriesSet { series := make([]storage.Series, 0, len(ms)) - for _, m := range ms { - if ctx.Err() != nil { + for i, m := range ms { + if (i+1)%util.CheckContextEveryNIterations == 0 && ctx.Err() != nil { return storage.ErrSeriesSet(ctx.Err()) } series = append(series, &ConcreteSeries{ diff --git a/pkg/querier/testutils.go b/pkg/querier/testutils.go index a335bff618..67917b0c18 100644 --- a/pkg/querier/testutils.go +++ b/pkg/querier/testutils.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -32,28 +33,28 @@ func (m *MockDistributor) QueryStream(ctx context.Context, from, to model.Time, args := m.Called(ctx, from, to, matchers) return args.Get(0).(*client.QueryStreamResponse), args.Error(1) } -func (m *MockDistributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, lbl model.LabelName, matchers ...*labels.Matcher) ([]string, error) { - args := m.Called(ctx, from, to, lbl, matchers) +func (m *MockDistributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, lbl model.LabelName, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { + args := m.Called(ctx, from, to, lbl, hints, matchers) return args.Get(0).([]string), args.Error(1) } -func (m *MockDistributor) LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, lbl model.LabelName, matchers ...*labels.Matcher) ([]string, error) { - args := m.Called(ctx, from, to, lbl, matchers) +func (m *MockDistributor) LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, lbl model.LabelName, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { + args := m.Called(ctx, from, to, lbl, hints, matchers) return args.Get(0).([]string), args.Error(1) } -func (m *MockDistributor) LabelNames(ctx context.Context, from, to model.Time) ([]string, error) { - args := m.Called(ctx, from, to) +func (m *MockDistributor) LabelNames(ctx context.Context, from, to model.Time, hints *storage.LabelHints) ([]string, error) { + args := m.Called(ctx, from, to, hints) return args.Get(0).([]string), args.Error(1) } -func (m *MockDistributor) LabelNamesStream(ctx context.Context, from, to model.Time) ([]string, error) { - args := m.Called(ctx, from, to) +func (m *MockDistributor) LabelNamesStream(ctx context.Context, from, to model.Time, hints *storage.LabelHints) ([]string, error) { + args := m.Called(ctx, from, to, hints) return args.Get(0).([]string), args.Error(1) } -func (m *MockDistributor) MetricsForLabelMatchers(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) { - args := m.Called(ctx, from, to, matchers) +func (m *MockDistributor) MetricsForLabelMatchers(ctx context.Context, from, to model.Time, hints *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) { + args := m.Called(ctx, from, to, hints, matchers) return args.Get(0).([]model.Metric), args.Error(1) } -func (m *MockDistributor) MetricsForLabelMatchersStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) ([]model.Metric, error) { - args := m.Called(ctx, from, to, matchers) +func (m *MockDistributor) MetricsForLabelMatchersStream(ctx context.Context, from, to model.Time, hints *storage.SelectHints, matchers ...*labels.Matcher) ([]model.Metric, error) { + args := m.Called(ctx, from, to, hints, matchers) return args.Get(0).([]model.Metric), args.Error(1) } diff --git a/pkg/querier/tripperware/custom.go b/pkg/querier/tripperware/custom.go new file mode 100644 index 0000000000..283262af7d --- /dev/null +++ b/pkg/querier/tripperware/custom.go @@ -0,0 +1,35 @@ +package tripperware + +import ( + "fmt" + + "github.com/gogo/protobuf/types" +) + +func (e *Extent) ToResponse() (Response, error) { + msg, err := types.EmptyAny(e.Response) + if err != nil { + return nil, err + } + + if err := types.UnmarshalAny(e.Response, msg); err != nil { + return nil, err + } + + resp, ok := msg.(Response) + if !ok { + return nil, fmt.Errorf("bad cached type") + } + return resp, nil +} + +func (m *Sample) GetTimestampMs() int64 { + if m != nil { + if m.Sample != nil { + return m.Sample.TimestampMs + } else if m.Histogram != nil { + return m.Histogram.TimestampMs + } + } + return 0 +} diff --git a/pkg/querier/tripperware/instantquery/custom.go b/pkg/querier/tripperware/instantquery/custom.go deleted file mode 100644 index c88f9284e1..0000000000 --- a/pkg/querier/tripperware/instantquery/custom.go +++ /dev/null @@ -1,12 +0,0 @@ -package instantquery - -func (m *Sample) GetTimestampMs() int64 { - if m != nil { - if m.Sample != nil { - return m.Sample.TimestampMs - } else if m.Histogram != nil { - return m.Histogram.TimestampMs - } - } - return 0 -} diff --git a/pkg/querier/tripperware/instantquery/instant_query.go b/pkg/querier/tripperware/instantquery/instant_query.go index 130dc3e4c9..e39617b5f4 100644 --- a/pkg/querier/tripperware/instantquery/instant_query.go +++ b/pkg/querier/tripperware/instantquery/instant_query.go @@ -7,7 +7,6 @@ import ( "io" "net/http" "net/url" - "sort" "strings" "time" "unsafe" @@ -17,15 +16,9 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/timestamp" - promqlparser "github.com/prometheus/prometheus/promql/parser" - "github.com/thanos-io/thanos/pkg/strutil" "github.com/weaveworks/common/httpgrpc" "google.golang.org/grpc/status" - "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/querier/tripperware" "github.com/cortexproject/cortex/pkg/querier/tripperware/queryrange" "github.com/cortexproject/cortex/pkg/util" @@ -33,7 +26,7 @@ import ( ) var ( - InstantQueryCodec tripperware.Codec = NewInstantQueryCodec("", true) + InstantQueryCodec tripperware.Codec = newInstantQueryCodec("", true) json = jsoniter.Config{ EscapeHTML: false, // No HTML in our responses. @@ -42,72 +35,6 @@ var ( }.Froze() ) -type PrometheusRequest struct { - tripperware.Request - Time int64 - Stats string - Query string - Path string - Headers http.Header -} - -// GetTime returns time in milliseconds. -func (r *PrometheusRequest) GetTime() int64 { - return r.Time -} - -// GetStart returns always 0 for instant query. -func (r *PrometheusRequest) GetStart() int64 { - return 0 -} - -// GetEnd returns always 0 for instant query. -func (r *PrometheusRequest) GetEnd() int64 { - return 0 -} - -// GetStep returns always 0 for instant query. -func (r *PrometheusRequest) GetStep() int64 { - return 0 -} - -// GetQuery returns the query of the request. -func (r *PrometheusRequest) GetQuery() string { - return r.Query -} - -// WithStartEnd clone the current request with different start and end timestamp. -func (r *PrometheusRequest) WithStartEnd(int64, int64) tripperware.Request { - return r -} - -// WithQuery clone the current request with a different query. -func (r *PrometheusRequest) WithQuery(query string) tripperware.Request { - q := *r - q.Query = query - return &q -} - -// LogToSpan writes information about this request to an OpenTracing span -func (r *PrometheusRequest) LogToSpan(sp opentracing.Span) { - sp.LogFields( - otlog.String("query", r.GetQuery()), - otlog.String("time", timestamp.Time(r.GetTime()).String()), - ) -} - -// GetStats returns the stats of the request. -func (r *PrometheusRequest) GetStats() string { - return r.Stats -} - -// WithStats clones the current `PrometheusRequest` with a new stats. -func (r *PrometheusRequest) WithStats(stats string) tripperware.Request { - q := *r - q.Stats = stats - return &q -} - type instantQueryCodec struct { tripperware.Codec compression queryrange.Compression @@ -115,10 +42,10 @@ type instantQueryCodec struct { now func() time.Time } -func NewInstantQueryCodec(c string, enableProtobuf bool) instantQueryCodec { +func newInstantQueryCodec(compressionStr string, enableProtobuf bool) instantQueryCodec { var compression queryrange.Compression - if c == "gzip" || c == "snappy" { - compression = queryrange.Compression(c) + if compressionStr == "gzip" { + compression = queryrange.Compression(compressionStr) } else { compression = queryrange.DisableCompression } @@ -129,22 +56,8 @@ func NewInstantQueryCodec(c string, enableProtobuf bool) instantQueryCodec { } } -func (resp *PrometheusInstantQueryResponse) HTTPHeaders() map[string][]string { - if resp != nil && resp.GetHeaders() != nil { - r := map[string][]string{} - for _, header := range resp.GetHeaders() { - if header != nil { - r[header.Name] = header.Values - } - } - - return r - } - return nil -} - func (c instantQueryCodec) DecodeRequest(_ context.Context, r *http.Request, forwardHeaders []string) (tripperware.Request, error) { - result := PrometheusRequest{Headers: map[string][]string{}} + result := tripperware.PrometheusRequest{Headers: map[string][]string{}} var err error result.Time, err = util.ParseTimeParam(r, "time", c.now().Unix()) if err != nil { @@ -169,7 +82,7 @@ func (c instantQueryCodec) DecodeRequest(_ context.Context, r *http.Request, for } func (instantQueryCodec) DecodeResponse(ctx context.Context, r *http.Response, _ tripperware.Request) (tripperware.Response, error) { - log, ctx := spanlogger.New(ctx, "PrometheusInstantQueryResponse") //nolint:ineffassign,staticcheck + log, ctx := spanlogger.New(ctx, "tripperware.PrometheusResponse") //nolint:ineffassign,staticcheck defer log.Finish() if err := ctx.Err(); err != nil { @@ -185,7 +98,7 @@ func (instantQueryCodec) DecodeResponse(ctx context.Context, r *http.Response, _ return nil, httpgrpc.Errorf(r.StatusCode, string(buf)) } - var resp PrometheusInstantQueryResponse + var resp tripperware.PrometheusResponse if r.Header != nil && r.Header.Get("Content-Type") == "application/x-protobuf" { err = proto.Unmarshal(buf, &resp) } else { @@ -204,7 +117,7 @@ func (instantQueryCodec) DecodeResponse(ctx context.Context, r *http.Response, _ } func (c instantQueryCodec) EncodeRequest(ctx context.Context, r tripperware.Request) (*http.Request, error) { - promReq, ok := r.(*PrometheusRequest) + promReq, ok := r.(*tripperware.PrometheusRequest) if !ok { return nil, httpgrpc.Errorf(http.StatusBadRequest, "invalid request format") } @@ -230,7 +143,7 @@ func (c instantQueryCodec) EncodeRequest(ctx context.Context, r tripperware.Requ } } - if c.compression == queryrange.SnappyCompression || c.compression == queryrange.GzipCompression { + if c.compression == queryrange.GzipCompression { h.Set("Accept-Encoding", string(c.compression)) } if c.enableProtobuf { @@ -254,7 +167,7 @@ func (instantQueryCodec) EncodeResponse(ctx context.Context, res tripperware.Res sp, _ := opentracing.StartSpanFromContext(ctx, "APIResponse.ToHTTPResponse") defer sp.Finish() - a, ok := res.(*PrometheusInstantQueryResponse) + a, ok := res.(*tripperware.PrometheusResponse) if !ok { return nil, httpgrpc.Errorf(http.StatusInternalServerError, "invalid response format") } @@ -278,293 +191,15 @@ func (instantQueryCodec) EncodeResponse(ctx context.Context, res tripperware.Res } func (instantQueryCodec) MergeResponse(ctx context.Context, req tripperware.Request, responses ...tripperware.Response) (tripperware.Response, error) { - sp, _ := opentracing.StartSpanFromContext(ctx, "PrometheusInstantQueryResponse.MergeResponse") + sp, _ := opentracing.StartSpanFromContext(ctx, "InstantQueryResponse.MergeResponse") sp.SetTag("response_count", len(responses)) defer sp.Finish() if len(responses) == 0 { - return NewEmptyPrometheusInstantQueryResponse(), nil - } else if len(responses) == 1 { - return responses[0], nil - } - - promResponses := make([]*PrometheusInstantQueryResponse, 0, len(responses)) - warnings := make([][]string, 0, len(responses)) - for _, resp := range responses { - promResponses = append(promResponses, resp.(*PrometheusInstantQueryResponse)) - if w := resp.(*PrometheusInstantQueryResponse).Warnings; w != nil { - warnings = append(warnings, w) - } - } - - var data PrometheusInstantQueryData - // For now, we only shard queries that returns a vector. - switch promResponses[0].Data.ResultType { - case model.ValVector.String(): - v, err := vectorMerge(ctx, req, promResponses) - if err != nil { - return nil, err - } - data = PrometheusInstantQueryData{ - ResultType: model.ValVector.String(), - Result: PrometheusInstantQueryResult{ - Result: &PrometheusInstantQueryResult_Vector{ - Vector: v, - }, - }, - Stats: statsMerge(promResponses), - } - case model.ValMatrix.String(): - sampleStreams, err := matrixMerge(ctx, promResponses) - if err != nil { - return nil, err - } - - data = PrometheusInstantQueryData{ - ResultType: model.ValMatrix.String(), - Result: PrometheusInstantQueryResult{ - Result: &PrometheusInstantQueryResult_Matrix{ - Matrix: &Matrix{ - SampleStreams: sampleStreams, - }, - }, - }, - Stats: statsMerge(promResponses), - } - default: - return nil, fmt.Errorf("unexpected result type on instant query: %s", promResponses[0].Data.ResultType) - } - - res := &PrometheusInstantQueryResponse{ - Status: queryrange.StatusSuccess, - Data: data, - Warnings: strutil.MergeUnsortedSlices(warnings...), - } - return res, nil -} - -func vectorMerge(ctx context.Context, req tripperware.Request, resps []*PrometheusInstantQueryResponse) (*Vector, error) { - output := map[string]*Sample{} - metrics := []string{} // Used to preserve the order for topk and bottomk. - sortPlan, err := sortPlanForQuery(req.GetQuery()) - if err != nil { - return nil, err - } - buf := make([]byte, 0, 1024) - for _, resp := range resps { - if err = ctx.Err(); err != nil { - return nil, err - } - if resp == nil { - continue - } - // Merge vector result samples only. Skip other types such as - // string, scalar as those are not sharable. - if resp.Data.Result.GetVector() == nil { - continue - } - for _, sample := range resp.Data.Result.GetVector().Samples { - s := sample - if s == nil { - continue - } - metric := string(cortexpb.FromLabelAdaptersToLabels(sample.Labels).Bytes(buf)) - if existingSample, ok := output[metric]; !ok { - output[metric] = s - metrics = append(metrics, metric) // Preserve the order of metric. - } else if existingSample.GetTimestampMs() < s.GetTimestampMs() { - // Choose the latest sample if we see overlap. - output[metric] = s - } - } - } - - result := &Vector{ - Samples: make([]*Sample, 0, len(output)), - } - - if len(output) == 0 { - return result, nil - } - - if sortPlan == mergeOnly { - for _, k := range metrics { - result.Samples = append(result.Samples, output[k]) - } - return result, nil + return tripperware.NewEmptyPrometheusResponse(true), nil } - samples := make([]*pair, 0, len(output)) - for k, v := range output { - samples = append(samples, &pair{ - metric: k, - s: v, - }) - } - - // TODO: What if we have mixed float and histogram samples in the response? - // Then the sorting behavior is undefined. Prometheus doesn't handle it. - sort.Slice(samples, func(i, j int) bool { - // Order is determined by vector. - switch sortPlan { - case sortByValuesAsc: - return getSortValueFromPair(samples, i) < getSortValueFromPair(samples, j) - case sortByValuesDesc: - return getSortValueFromPair(samples, i) > getSortValueFromPair(samples, j) - } - return samples[i].metric < samples[j].metric - }) - - for _, p := range samples { - result.Samples = append(result.Samples, p.s) - } - return result, nil -} - -type sortPlan int - -const ( - mergeOnly sortPlan = 0 - sortByValuesAsc sortPlan = 1 - sortByValuesDesc sortPlan = 2 - sortByLabels sortPlan = 3 -) - -type pair struct { - metric string - s *Sample -} - -// getSortValueFromPair gets the float value used for sorting from samples. -// If float sample, use sample value. If histogram sample, use histogram sum. -// This is the same behavior as Prometheus https://github.com/prometheus/prometheus/blob/v2.53.0/promql/functions.go#L1595. -func getSortValueFromPair(samples []*pair, i int) float64 { - if samples[i].s.Histogram != nil { - return samples[i].s.Histogram.Histogram.Sum - } - // Impossible to have both histogram and sample nil. - return samples[i].s.Sample.Value -} - -func sortPlanForQuery(q string) (sortPlan, error) { - expr, err := promqlparser.ParseExpr(q) - if err != nil { - return 0, err - } - // Check if the root expression is topk or bottomk - if aggr, ok := expr.(*promqlparser.AggregateExpr); ok { - if aggr.Op == promqlparser.TOPK || aggr.Op == promqlparser.BOTTOMK { - return mergeOnly, nil - } - } - checkForSort := func(expr promqlparser.Expr) (sortAsc, sortDesc bool) { - if n, ok := expr.(*promqlparser.Call); ok { - if n.Func != nil { - if n.Func.Name == "sort" { - sortAsc = true - } - if n.Func.Name == "sort_desc" { - sortDesc = true - } - } - } - return sortAsc, sortDesc - } - // Check the root expression for sort - if sortAsc, sortDesc := checkForSort(expr); sortAsc || sortDesc { - if sortAsc { - return sortByValuesAsc, nil - } - return sortByValuesDesc, nil - } - - // If the root expression is a binary expression, check the LHS and RHS for sort - if bin, ok := expr.(*promqlparser.BinaryExpr); ok { - if sortAsc, sortDesc := checkForSort(bin.LHS); sortAsc || sortDesc { - if sortAsc { - return sortByValuesAsc, nil - } - return sortByValuesDesc, nil - } - if sortAsc, sortDesc := checkForSort(bin.RHS); sortAsc || sortDesc { - if sortAsc { - return sortByValuesAsc, nil - } - return sortByValuesDesc, nil - } - } - return sortByLabels, nil -} - -func matrixMerge(ctx context.Context, resps []*PrometheusInstantQueryResponse) ([]tripperware.SampleStream, error) { - output := make(map[string]tripperware.SampleStream) - for _, resp := range resps { - if err := ctx.Err(); err != nil { - return nil, err - } - if resp == nil { - continue - } - if resp.Data.Result.GetMatrix() == nil { - continue - } - tripperware.MergeSampleStreams(output, resp.Data.Result.GetMatrix().GetSampleStreams()) - } - - keys := make([]string, 0, len(output)) - for key := range output { - keys = append(keys, key) - } - sort.Strings(keys) - - result := make([]tripperware.SampleStream, 0, len(output)) - for _, key := range keys { - result = append(result, output[key]) - } - - return result, nil -} - -// NewEmptyPrometheusInstantQueryResponse returns an empty successful Prometheus query range response. -func NewEmptyPrometheusInstantQueryResponse() *PrometheusInstantQueryResponse { - return &PrometheusInstantQueryResponse{ - Status: queryrange.StatusSuccess, - Data: PrometheusInstantQueryData{ - ResultType: model.ValVector.String(), - Result: PrometheusInstantQueryResult{ - Result: &PrometheusInstantQueryResult_Vector{}, - }, - }, - } -} - -func statsMerge(resps []*PrometheusInstantQueryResponse) *tripperware.PrometheusResponseStats { - output := map[int64]*tripperware.PrometheusResponseQueryableSamplesStatsPerStep{} - hasStats := false - for _, resp := range resps { - if resp.Data.Stats == nil { - continue - } - - hasStats = true - if resp.Data.Stats.Samples == nil { - continue - } - - for _, s := range resp.Data.Stats.Samples.TotalQueryableSamplesPerStep { - if stats, ok := output[s.GetTimestampMs()]; ok { - stats.Value += s.Value - } else { - output[s.GetTimestampMs()] = s - } - } - } - - if !hasStats { - return nil - } - - return tripperware.StatsMerge(output) + return tripperware.MergeResponse(ctx, true, req, responses...) } func decorateWithParamName(err error, field string) error { @@ -574,145 +209,3 @@ func decorateWithParamName(err error, field string) error { } return fmt.Errorf(errTmpl, field, err) } - -func init() { - jsoniter.RegisterTypeEncoderFunc("instantquery.Sample", encodeSample, marshalJSONIsEmpty) - jsoniter.RegisterTypeDecoderFunc("instantquery.Sample", decodeSample) -} - -func marshalJSONIsEmpty(ptr unsafe.Pointer) bool { - return false -} - -func decodeSample(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - ss := (*Sample)(ptr) - for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - switch field { - case "metric": - metricString := iter.ReadAny().ToString() - lbls := labels.Labels{} - if err := json.UnmarshalFromString(metricString, &lbls); err != nil { - iter.ReportError("unmarshal Sample", err.Error()) - return - } - ss.Labels = cortexpb.FromLabelsToLabelAdapters(lbls) - case "value": - ss.Sample = &cortexpb.Sample{} - cortexpb.SampleJsoniterDecode(unsafe.Pointer(ss.Sample), iter) - case "histogram": - ss.Histogram = &tripperware.SampleHistogramPair{} - tripperware.UnmarshalSampleHistogramPairJSON(unsafe.Pointer(ss.Histogram), iter) - default: - iter.ReportError("unmarshal Sample", fmt.Sprint("unexpected key:", field)) - return - } - } -} - -func encodeSample(ptr unsafe.Pointer, stream *jsoniter.Stream) { - ss := (*Sample)(ptr) - stream.WriteObjectStart() - - stream.WriteObjectField(`metric`) - lbls, err := cortexpb.FromLabelAdaptersToLabels(ss.Labels).MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), lbls...)) - - if ss.Sample != nil { - stream.WriteMore() - stream.WriteObjectField(`value`) - cortexpb.SampleJsoniterEncode(unsafe.Pointer(ss.Sample), stream) - } - - if ss.Histogram != nil { - stream.WriteMore() - stream.WriteObjectField(`histogram`) - tripperware.MarshalSampleHistogramPairJSON(unsafe.Pointer(ss.Histogram), stream) - } - - stream.WriteObjectEnd() -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *PrometheusInstantQueryData) UnmarshalJSON(data []byte) error { - var queryData struct { - ResultType string `json:"resultType"` - Stats *tripperware.PrometheusResponseStats `json:"stats,omitempty"` - } - - if err := json.Unmarshal(data, &queryData); err != nil { - return err - } - s.ResultType = queryData.ResultType - s.Stats = queryData.Stats - switch s.ResultType { - case model.ValVector.String(): - var result struct { - Samples []*Sample `json:"result"` - } - if err := json.Unmarshal(data, &result); err != nil { - return err - } - s.Result = PrometheusInstantQueryResult{ - Result: &PrometheusInstantQueryResult_Vector{Vector: &Vector{ - Samples: result.Samples, - }}, - } - case model.ValMatrix.String(): - var result struct { - SampleStreams []tripperware.SampleStream `json:"result"` - } - if err := json.Unmarshal(data, &result); err != nil { - return err - } - s.Result = PrometheusInstantQueryResult{ - Result: &PrometheusInstantQueryResult_Matrix{Matrix: &Matrix{ - SampleStreams: result.SampleStreams, - }}, - } - default: - s.Result = PrometheusInstantQueryResult{ - Result: &PrometheusInstantQueryResult_RawBytes{data}, - } - } - return nil -} - -// MarshalJSON implements json.Marshaler. -func (s *PrometheusInstantQueryData) MarshalJSON() ([]byte, error) { - switch s.ResultType { - case model.ValVector.String(): - res := struct { - ResultType string `json:"resultType"` - Data []*Sample `json:"result"` - Stats *tripperware.PrometheusResponseStats `json:"stats,omitempty"` - }{ - ResultType: s.ResultType, - Data: s.Result.GetVector().Samples, - Stats: s.Stats, - } - if res.Data == nil { - res.Data = []*Sample{} - } - return json.Marshal(res) - case model.ValMatrix.String(): - res := struct { - ResultType string `json:"resultType"` - Data []tripperware.SampleStream `json:"result"` - Stats *tripperware.PrometheusResponseStats `json:"stats,omitempty"` - }{ - ResultType: s.ResultType, - Data: s.Result.GetMatrix().SampleStreams, - Stats: s.Stats, - } - if res.Data == nil { - res.Data = []tripperware.SampleStream{} - } - return json.Marshal(res) - default: - return s.Result.GetRawBytes(), nil - } -} diff --git a/pkg/querier/tripperware/instantquery/instant_query_test.go b/pkg/querier/tripperware/instantquery/instant_query_test.go index 36b5456a18..fb7da2b3d5 100644 --- a/pkg/querier/tripperware/instantquery/instant_query_test.go +++ b/pkg/querier/tripperware/instantquery/instant_query_test.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/gzip" "context" + "errors" "fmt" "io" "net/http" @@ -33,13 +34,13 @@ func TestRequest(t *testing.T) { for _, tc := range []struct { url string expectedURL string - expected *PrometheusRequest + expected *tripperware.PrometheusRequest expectedErr error }{ { url: "/api/v1/query?query=sum%28container_memory_rss%29+by+%28namespace%29&stats=all&time=1536673680", expectedURL: "/api/v1/query?query=sum%28container_memory_rss%29+by+%28namespace%29&stats=all&time=1536673680", - expected: &PrometheusRequest{ + expected: &tripperware.PrometheusRequest{ Path: "/api/v1/query", Time: 1536673680 * 1e3, Query: "sum(container_memory_rss) by (namespace)", @@ -52,7 +53,7 @@ func TestRequest(t *testing.T) { { url: "/api/v1/query?query=sum%28container_memory_rss%29+by+%28namespace%29&time=1536673680", expectedURL: "/api/v1/query?query=sum%28container_memory_rss%29+by+%28namespace%29&time=1536673680", - expected: &PrometheusRequest{ + expected: &tripperware.PrometheusRequest{ Path: "/api/v1/query", Time: 1536673680 * 1e3, Query: "sum(container_memory_rss) by (namespace)", @@ -65,7 +66,7 @@ func TestRequest(t *testing.T) { { url: "/api/v1/query?query=sum%28container_memory_rss%29+by+%28namespace%29", expectedURL: "/api/v1/query?query=sum%28container_memory_rss%29+by+%28namespace%29&time=", - expected: &PrometheusRequest{ + expected: &tripperware.PrometheusRequest{ Path: "/api/v1/query", Time: 0, Query: "sum(container_memory_rss) by (namespace)", @@ -401,7 +402,7 @@ func TestResponse(t *testing.T) { func TestMergeResponse(t *testing.T) { t.Parallel() - defaultReq := &PrometheusRequest{ + defaultReq := &tripperware.PrometheusRequest{ Query: "sum(up)", } for _, tc := range []struct { @@ -482,7 +483,7 @@ func TestMergeResponse(t *testing.T) { }, { name: "merge two responses with sort", - req: &PrometheusRequest{Query: "sort(sum by (job) (up))"}, + req: &tripperware.PrometheusRequest{Query: "sort(sum by (job) (up))"}, resps: []string{ `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"foo"},"value":[1,"1"]}]}}`, `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"bar"},"value":[1,"2"]}]}}`, @@ -491,7 +492,7 @@ func TestMergeResponse(t *testing.T) { }, { name: "merge two histogram responses with sort", - req: &PrometheusRequest{Query: "sort(sum by (job) (up))"}, + req: &tripperware.PrometheusRequest{Query: "sort(sum by (job) (up))"}, resps: []string{ `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"foo"},"histogram":[1719528871.898,{"count":"6342","sum":"43.31319875499995","buckets":[[0,"0.0013810679320049755","0.0015060652591874421","1"]]}]}]}}`, `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"bar"},"histogram":[1719528880,{"count":"1","sum":"0","buckets":[[0,"0.0013810679320049755","0.0015060652591874421","1"]]}]}]}}`, @@ -500,7 +501,7 @@ func TestMergeResponse(t *testing.T) { }, { name: "merge two responses with sort_desc", - req: &PrometheusRequest{Query: "sort_desc(sum by (job) (up))"}, + req: &tripperware.PrometheusRequest{Query: "sort_desc(sum by (job) (up))"}, resps: []string{ `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"foo"},"value":[1,"1"]}]}}`, `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"bar"},"value":[1,"2"]}]}}`, @@ -509,7 +510,7 @@ func TestMergeResponse(t *testing.T) { }, { name: "merge two histogram responses with sort_desc", - req: &PrometheusRequest{Query: "sort_desc(sum by (job) (up))"}, + req: &tripperware.PrometheusRequest{Query: "sort_desc(sum by (job) (up))"}, resps: []string{ `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"foo"},"histogram":[1719528871.898,{"count":"6342","sum":"43.31319875499995","buckets":[[0,"0.0013810679320049755","0.0015060652591874421","1"]]}]}]}}`, `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"bar"},"histogram":[1719528880,{"count":"1","sum":"0","buckets":[[0,"0.0013810679320049755","0.0015060652591874421","1"]]}]}]}}`, @@ -518,7 +519,7 @@ func TestMergeResponse(t *testing.T) { }, { name: "merge two responses with topk", - req: &PrometheusRequest{Query: "topk(10, up) by(job)"}, + req: &tripperware.PrometheusRequest{Query: "topk(10, up) by(job)"}, resps: []string{ `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"foo"},"value":[1,"1"]}]}}`, `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"bar"},"value":[1,"2"]}]}}`, @@ -527,7 +528,7 @@ func TestMergeResponse(t *testing.T) { }, { name: "merge two histogram responses with topk", - req: &PrometheusRequest{Query: "topk(10, up) by(job)"}, + req: &tripperware.PrometheusRequest{Query: "topk(10, up) by(job)"}, resps: []string{ `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"foo"},"histogram":[1719528871.898,{"count":"6342","sum":"43.31319875499995","buckets":[[0,"0.0013810679320049755","0.0015060652591874421","1"]]}]}]}}`, `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"bar"},"histogram":[1719528880,{"count":"1","sum":"0","buckets":[[0,"0.0013810679320049755","0.0015060652591874421","1"]]}]}]}}`, @@ -536,7 +537,7 @@ func TestMergeResponse(t *testing.T) { }, { name: "merge with warnings.", - req: &PrometheusRequest{Query: "topk(10, up) by(job)"}, + req: &tripperware.PrometheusRequest{Query: "topk(10, up) by(job)"}, resps: []string{ `{"status":"success","warnings":["warning1","warning2"],"data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"foo"},"value":[1,"1"]}]}}`, `{"status":"success","warnings":["warning1","warning3"],"data":{"resultType":"vector","result":[{"metric":{"__name__":"up","job":"bar"},"value":[1,"2"]}]}}`, @@ -559,7 +560,7 @@ func TestMergeResponse(t *testing.T) { `{"status":"success","data":{"resultType":"string","result":[1662682521.409,"foo"]}}`, `{"status":"success","data":{"resultType":"string","result":[1662682521.409,"foo"]}}`, }, - expectedErr: fmt.Errorf("unexpected result type on instant query: %s", "string"), + expectedErr: errors.New("unexpected result type: string"), }, { name: "single matrix response", @@ -655,7 +656,7 @@ func TestMergeResponse(t *testing.T) { assert.Equal(t, tc.expectedErr, err) contents, err := io.ReadAll(dr.Body) assert.Equal(t, tc.expectedErr, err) - assert.Equal(t, string(contents), tc.expectedResp) + assert.Equal(t, tc.expectedResp, string(contents)) cancelCtx() }) } @@ -1766,12 +1767,12 @@ func Benchmark_Decode(b *testing.B) { }, } { b.Run(name, func(b *testing.B) { - r := PrometheusInstantQueryResponse{ - Data: PrometheusInstantQueryData{ + r := tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: PrometheusInstantQueryResult{ - Result: &PrometheusInstantQueryResult_Matrix{ - Matrix: &Matrix{ + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ SampleStreams: tc.sampleStream, }, }, diff --git a/pkg/querier/tripperware/instantquery/instantquery.pb.go b/pkg/querier/tripperware/instantquery/instantquery.pb.go deleted file mode 100644 index 6324739eb3..0000000000 --- a/pkg/querier/tripperware/instantquery/instantquery.pb.go +++ /dev/null @@ -1,2508 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: instantquery.proto - -package instantquery - -import ( - bytes "bytes" - fmt "fmt" - cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - tripperware "github.com/cortexproject/cortex/pkg/querier/tripperware" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type PrometheusInstantQueryResponse struct { - Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` - Data PrometheusInstantQueryData `protobuf:"bytes,2,opt,name=Data,proto3" json:"data,omitempty"` - ErrorType string `protobuf:"bytes,3,opt,name=ErrorType,proto3" json:"errorType,omitempty"` - Error string `protobuf:"bytes,4,opt,name=Error,proto3" json:"error,omitempty"` - Headers []*tripperware.PrometheusResponseHeader `protobuf:"bytes,5,rep,name=Headers,proto3" json:"-"` - Warnings []string `protobuf:"bytes,6,rep,name=Warnings,proto3" json:"warnings,omitempty"` -} - -func (m *PrometheusInstantQueryResponse) Reset() { *m = PrometheusInstantQueryResponse{} } -func (*PrometheusInstantQueryResponse) ProtoMessage() {} -func (*PrometheusInstantQueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d2ce36475a368033, []int{0} -} -func (m *PrometheusInstantQueryResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusInstantQueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusInstantQueryResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusInstantQueryResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusInstantQueryResponse.Merge(m, src) -} -func (m *PrometheusInstantQueryResponse) XXX_Size() int { - return m.Size() -} -func (m *PrometheusInstantQueryResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusInstantQueryResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusInstantQueryResponse proto.InternalMessageInfo - -func (m *PrometheusInstantQueryResponse) GetStatus() string { - if m != nil { - return m.Status - } - return "" -} - -func (m *PrometheusInstantQueryResponse) GetData() PrometheusInstantQueryData { - if m != nil { - return m.Data - } - return PrometheusInstantQueryData{} -} - -func (m *PrometheusInstantQueryResponse) GetErrorType() string { - if m != nil { - return m.ErrorType - } - return "" -} - -func (m *PrometheusInstantQueryResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *PrometheusInstantQueryResponse) GetHeaders() []*tripperware.PrometheusResponseHeader { - if m != nil { - return m.Headers - } - return nil -} - -func (m *PrometheusInstantQueryResponse) GetWarnings() []string { - if m != nil { - return m.Warnings - } - return nil -} - -type PrometheusInstantQueryData struct { - ResultType string `protobuf:"bytes,1,opt,name=ResultType,proto3" json:"resultType"` - Result PrometheusInstantQueryResult `protobuf:"bytes,2,opt,name=Result,proto3" json:"result"` - Stats *tripperware.PrometheusResponseStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *PrometheusInstantQueryData) Reset() { *m = PrometheusInstantQueryData{} } -func (*PrometheusInstantQueryData) ProtoMessage() {} -func (*PrometheusInstantQueryData) Descriptor() ([]byte, []int) { - return fileDescriptor_d2ce36475a368033, []int{1} -} -func (m *PrometheusInstantQueryData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusInstantQueryData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusInstantQueryData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusInstantQueryData) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusInstantQueryData.Merge(m, src) -} -func (m *PrometheusInstantQueryData) XXX_Size() int { - return m.Size() -} -func (m *PrometheusInstantQueryData) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusInstantQueryData.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusInstantQueryData proto.InternalMessageInfo - -func (m *PrometheusInstantQueryData) GetResultType() string { - if m != nil { - return m.ResultType - } - return "" -} - -func (m *PrometheusInstantQueryData) GetResult() PrometheusInstantQueryResult { - if m != nil { - return m.Result - } - return PrometheusInstantQueryResult{} -} - -func (m *PrometheusInstantQueryData) GetStats() *tripperware.PrometheusResponseStats { - if m != nil { - return m.Stats - } - return nil -} - -type PrometheusInstantQueryResult struct { - // Types that are valid to be assigned to Result: - // *PrometheusInstantQueryResult_Vector - // *PrometheusInstantQueryResult_RawBytes - // *PrometheusInstantQueryResult_Matrix - Result isPrometheusInstantQueryResult_Result `protobuf_oneof:"result"` -} - -func (m *PrometheusInstantQueryResult) Reset() { *m = PrometheusInstantQueryResult{} } -func (*PrometheusInstantQueryResult) ProtoMessage() {} -func (*PrometheusInstantQueryResult) Descriptor() ([]byte, []int) { - return fileDescriptor_d2ce36475a368033, []int{2} -} -func (m *PrometheusInstantQueryResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusInstantQueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusInstantQueryResult.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusInstantQueryResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusInstantQueryResult.Merge(m, src) -} -func (m *PrometheusInstantQueryResult) XXX_Size() int { - return m.Size() -} -func (m *PrometheusInstantQueryResult) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusInstantQueryResult.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusInstantQueryResult proto.InternalMessageInfo - -type isPrometheusInstantQueryResult_Result interface { - isPrometheusInstantQueryResult_Result() - Equal(interface{}) bool - MarshalTo([]byte) (int, error) - Size() int -} - -type PrometheusInstantQueryResult_Vector struct { - Vector *Vector `protobuf:"bytes,1,opt,name=vector,proto3,oneof"` -} -type PrometheusInstantQueryResult_RawBytes struct { - RawBytes []byte `protobuf:"bytes,2,opt,name=rawBytes,proto3,oneof"` -} -type PrometheusInstantQueryResult_Matrix struct { - Matrix *Matrix `protobuf:"bytes,3,opt,name=matrix,proto3,oneof"` -} - -func (*PrometheusInstantQueryResult_Vector) isPrometheusInstantQueryResult_Result() {} -func (*PrometheusInstantQueryResult_RawBytes) isPrometheusInstantQueryResult_Result() {} -func (*PrometheusInstantQueryResult_Matrix) isPrometheusInstantQueryResult_Result() {} - -func (m *PrometheusInstantQueryResult) GetResult() isPrometheusInstantQueryResult_Result { - if m != nil { - return m.Result - } - return nil -} - -func (m *PrometheusInstantQueryResult) GetVector() *Vector { - if x, ok := m.GetResult().(*PrometheusInstantQueryResult_Vector); ok { - return x.Vector - } - return nil -} - -func (m *PrometheusInstantQueryResult) GetRawBytes() []byte { - if x, ok := m.GetResult().(*PrometheusInstantQueryResult_RawBytes); ok { - return x.RawBytes - } - return nil -} - -func (m *PrometheusInstantQueryResult) GetMatrix() *Matrix { - if x, ok := m.GetResult().(*PrometheusInstantQueryResult_Matrix); ok { - return x.Matrix - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*PrometheusInstantQueryResult) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*PrometheusInstantQueryResult_Vector)(nil), - (*PrometheusInstantQueryResult_RawBytes)(nil), - (*PrometheusInstantQueryResult_Matrix)(nil), - } -} - -type Vector struct { - Samples []*Sample `protobuf:"bytes,1,rep,name=samples,proto3" json:"samples,omitempty"` -} - -func (m *Vector) Reset() { *m = Vector{} } -func (*Vector) ProtoMessage() {} -func (*Vector) Descriptor() ([]byte, []int) { - return fileDescriptor_d2ce36475a368033, []int{3} -} -func (m *Vector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Vector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Vector.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Vector) XXX_Merge(src proto.Message) { - xxx_messageInfo_Vector.Merge(m, src) -} -func (m *Vector) XXX_Size() int { - return m.Size() -} -func (m *Vector) XXX_DiscardUnknown() { - xxx_messageInfo_Vector.DiscardUnknown(m) -} - -var xxx_messageInfo_Vector proto.InternalMessageInfo - -func (m *Vector) GetSamples() []*Sample { - if m != nil { - return m.Samples - } - return nil -} - -type Sample struct { - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"metric"` - Sample *cortexpb.Sample `protobuf:"bytes,2,opt,name=sample,proto3" json:"value"` - Histogram *tripperware.SampleHistogramPair `protobuf:"bytes,3,opt,name=histogram,proto3" json:"histogram"` -} - -func (m *Sample) Reset() { *m = Sample{} } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_d2ce36475a368033, []int{4} -} -func (m *Sample) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Sample.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(m, src) -} -func (m *Sample) XXX_Size() int { - return m.Size() -} -func (m *Sample) XXX_DiscardUnknown() { - xxx_messageInfo_Sample.DiscardUnknown(m) -} - -var xxx_messageInfo_Sample proto.InternalMessageInfo - -func (m *Sample) GetSample() *cortexpb.Sample { - if m != nil { - return m.Sample - } - return nil -} - -func (m *Sample) GetHistogram() *tripperware.SampleHistogramPair { - if m != nil { - return m.Histogram - } - return nil -} - -type Matrix struct { - SampleStreams []tripperware.SampleStream `protobuf:"bytes,1,rep,name=sampleStreams,proto3" json:"sampleStreams"` -} - -func (m *Matrix) Reset() { *m = Matrix{} } -func (*Matrix) ProtoMessage() {} -func (*Matrix) Descriptor() ([]byte, []int) { - return fileDescriptor_d2ce36475a368033, []int{5} -} -func (m *Matrix) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Matrix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Matrix.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Matrix) XXX_Merge(src proto.Message) { - xxx_messageInfo_Matrix.Merge(m, src) -} -func (m *Matrix) XXX_Size() int { - return m.Size() -} -func (m *Matrix) XXX_DiscardUnknown() { - xxx_messageInfo_Matrix.DiscardUnknown(m) -} - -var xxx_messageInfo_Matrix proto.InternalMessageInfo - -func (m *Matrix) GetSampleStreams() []tripperware.SampleStream { - if m != nil { - return m.SampleStreams - } - return nil -} - -func init() { - proto.RegisterType((*PrometheusInstantQueryResponse)(nil), "instantquery.PrometheusInstantQueryResponse") - proto.RegisterType((*PrometheusInstantQueryData)(nil), "instantquery.PrometheusInstantQueryData") - proto.RegisterType((*PrometheusInstantQueryResult)(nil), "instantquery.PrometheusInstantQueryResult") - proto.RegisterType((*Vector)(nil), "instantquery.Vector") - proto.RegisterType((*Sample)(nil), "instantquery.Sample") - proto.RegisterType((*Matrix)(nil), "instantquery.Matrix") -} - -func init() { proto.RegisterFile("instantquery.proto", fileDescriptor_d2ce36475a368033) } - -var fileDescriptor_d2ce36475a368033 = []byte{ - // 713 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x4a, - 0x14, 0xf5, 0x34, 0x8d, 0xdb, 0x4c, 0xda, 0xbe, 0xf7, 0xa6, 0x55, 0x5f, 0x5e, 0x55, 0x8d, 0xa3, - 0xe8, 0x21, 0x05, 0x04, 0x8e, 0x14, 0x84, 0x80, 0x25, 0x86, 0xa2, 0x20, 0x40, 0x6d, 0xa7, 0x15, - 0x48, 0xec, 0x26, 0xe9, 0x28, 0x35, 0xc4, 0xb1, 0x99, 0x99, 0xb4, 0xcd, 0x8e, 0x2f, 0x40, 0x7c, - 0x05, 0xe2, 0x53, 0xba, 0xec, 0xb2, 0x62, 0x61, 0xd1, 0x74, 0x83, 0xbc, 0x2a, 0x3b, 0x96, 0xc8, - 0x33, 0xe3, 0xc6, 0x81, 0x02, 0x65, 0xe7, 0xb9, 0xf7, 0x9c, 0x73, 0xef, 0x3d, 0x77, 0x3c, 0x10, - 0xf9, 0x7d, 0x21, 0x69, 0x5f, 0xbe, 0x1e, 0x30, 0x3e, 0x74, 0x23, 0x1e, 0xca, 0x10, 0xcd, 0xe5, - 0x63, 0x2b, 0x4b, 0xdd, 0xb0, 0x1b, 0xaa, 0x44, 0x23, 0xfd, 0xd2, 0x98, 0x95, 0xbb, 0x5d, 0x5f, - 0xee, 0x0e, 0xda, 0x6e, 0x27, 0x0c, 0x1a, 0x9d, 0x90, 0x4b, 0x76, 0x10, 0xf1, 0xf0, 0x25, 0xeb, - 0x48, 0x73, 0x6a, 0x44, 0xaf, 0xba, 0x59, 0xa2, 0x6d, 0x3e, 0x0c, 0xf5, 0xfe, 0x65, 0xa8, 0x69, - 0x6d, 0x9f, 0xf1, 0x86, 0xe4, 0x7e, 0x14, 0x31, 0xbe, 0x4f, 0x39, 0x6b, 0xe4, 0x7a, 0xac, 0x7d, - 0x9d, 0x82, 0x78, 0x83, 0x87, 0x01, 0x93, 0xbb, 0x6c, 0x20, 0x1e, 0xe9, 0x86, 0x37, 0x53, 0x00, - 0x61, 0x22, 0x0a, 0xfb, 0x82, 0xa1, 0x1a, 0xb4, 0xb7, 0x24, 0x95, 0x03, 0x51, 0x01, 0x55, 0x50, - 0x2f, 0x79, 0x30, 0x89, 0x1d, 0x5b, 0xa8, 0x08, 0x31, 0x19, 0xb4, 0x0d, 0xa7, 0x1f, 0x50, 0x49, - 0x2b, 0x53, 0x55, 0x50, 0x2f, 0x37, 0xeb, 0xee, 0x84, 0x1b, 0x17, 0xeb, 0xa7, 0x78, 0x6f, 0xf9, - 0x30, 0x76, 0xac, 0x24, 0x76, 0x16, 0x76, 0xa8, 0xa4, 0xd7, 0xc3, 0xc0, 0x97, 0x2c, 0x88, 0xe4, - 0x90, 0x28, 0x35, 0x74, 0x0b, 0x96, 0xd6, 0x38, 0x0f, 0xf9, 0xf6, 0x30, 0x62, 0x95, 0x82, 0x2a, - 0xfe, 0x6f, 0x12, 0x3b, 0x8b, 0x2c, 0x0b, 0xe6, 0x18, 0x63, 0x24, 0xba, 0x0a, 0x8b, 0xea, 0x50, - 0x99, 0x56, 0x94, 0xc5, 0x24, 0x76, 0xfe, 0x52, 0x94, 0x1c, 0x5c, 0x23, 0xd0, 0x43, 0x38, 0xd3, - 0x62, 0x74, 0x87, 0x71, 0x51, 0x29, 0x56, 0x0b, 0xf5, 0x72, 0xf3, 0x8a, 0x9b, 0x73, 0x2a, 0xd7, - 0x79, 0xe6, 0x86, 0x46, 0x7b, 0xc5, 0x24, 0x76, 0xc0, 0x0d, 0x92, 0x91, 0x51, 0x13, 0xce, 0x3e, - 0xa7, 0xbc, 0xef, 0xf7, 0xbb, 0xa2, 0x62, 0x57, 0x0b, 0xf5, 0x92, 0xb7, 0x9c, 0xc4, 0x0e, 0xda, - 0x37, 0xb1, 0x5c, 0xe1, 0x73, 0x5c, 0xed, 0x0b, 0x80, 0x2b, 0x3f, 0xb7, 0x06, 0xb9, 0x10, 0x12, - 0x26, 0x06, 0x3d, 0xa9, 0xa6, 0xd7, 0xd6, 0x2f, 0x24, 0xb1, 0x03, 0xf9, 0x79, 0x94, 0xe4, 0x10, - 0x88, 0x40, 0x5b, 0x9f, 0xcc, 0x12, 0xae, 0x5d, 0x66, 0x09, 0x9a, 0xe1, 0x2d, 0x98, 0x35, 0xd8, - 0x5a, 0x9b, 0x18, 0x25, 0xb4, 0x0e, 0x8b, 0xe9, 0xa2, 0x85, 0x32, 0xbf, 0xdc, 0xfc, 0xff, 0x37, - 0xe6, 0xa4, 0x97, 0x41, 0x68, 0xbf, 0x15, 0x2d, 0xef, 0xb7, 0x0a, 0xd4, 0xde, 0x03, 0xb8, 0xfa, - 0xab, 0x4e, 0x90, 0x0b, 0xed, 0x3d, 0xd6, 0x91, 0x21, 0x57, 0x13, 0x97, 0x9b, 0x4b, 0x93, 0x53, - 0x3c, 0x53, 0xb9, 0x96, 0x45, 0x0c, 0x0a, 0xad, 0xc2, 0x59, 0x4e, 0xf7, 0xbd, 0xa1, 0x64, 0x42, - 0xcd, 0x3d, 0xd7, 0xb2, 0xc8, 0x79, 0x24, 0x55, 0x0b, 0xa8, 0xe4, 0xfe, 0x81, 0x19, 0xe0, 0x3b, - 0xb5, 0xa7, 0x2a, 0x97, 0xaa, 0x69, 0x94, 0x37, 0x0b, 0x8d, 0x03, 0xb5, 0x3b, 0xd0, 0xd6, 0xb5, - 0x90, 0x0b, 0x67, 0x04, 0x0d, 0xa2, 0x1e, 0x4b, 0xef, 0x7f, 0xe1, 0x47, 0x91, 0x2d, 0x95, 0x24, - 0x19, 0xa8, 0xf6, 0x76, 0x0a, 0xda, 0x3a, 0x86, 0x0e, 0xa0, 0xdd, 0xa3, 0x6d, 0xd6, 0xcb, 0x98, - 0x8b, 0x6e, 0xf6, 0x27, 0xbb, 0x4f, 0xd2, 0xf8, 0x06, 0xf5, 0xb9, 0xf7, 0x38, 0xf5, 0xfe, 0x63, - 0xec, 0xfc, 0xd1, 0x4b, 0xa0, 0xf9, 0xf7, 0x76, 0x68, 0x24, 0x19, 0x4f, 0x17, 0x17, 0x30, 0xc9, - 0xfd, 0x0e, 0x31, 0xf5, 0xd0, 0x6d, 0x68, 0xeb, 0x7e, 0xcc, 0x65, 0xf8, 0x7b, 0x5c, 0x59, 0xf7, - 0xe6, 0xcd, 0x1f, 0xc6, 0x0e, 0x48, 0x62, 0xa7, 0xb8, 0x47, 0x7b, 0x03, 0x46, 0x0c, 0x1c, 0x6d, - 0xc2, 0xd2, 0xae, 0x2f, 0x64, 0xd8, 0xe5, 0x34, 0x30, 0xa6, 0x55, 0x27, 0xb6, 0xae, 0xe9, 0xad, - 0x0c, 0xa3, 0x46, 0xf8, 0xc7, 0x68, 0x8d, 0xa9, 0x64, 0xfc, 0x59, 0x5b, 0x87, 0xb6, 0x36, 0x1a, - 0xad, 0xc1, 0x79, 0x5d, 0x66, 0x4b, 0x72, 0x46, 0x83, 0xcc, 0x96, 0xff, 0x2e, 0x28, 0xa0, 0x11, - 0xde, 0x74, 0x6a, 0x0e, 0x99, 0x64, 0x79, 0xde, 0xd1, 0x09, 0xb6, 0x8e, 0x4f, 0xb0, 0x75, 0x76, - 0x82, 0xc1, 0x9b, 0x11, 0x06, 0x1f, 0x46, 0x18, 0x1c, 0x8e, 0x30, 0x38, 0x1a, 0x61, 0xf0, 0x69, - 0x84, 0xc1, 0xe7, 0x11, 0xb6, 0xce, 0x46, 0x18, 0xbc, 0x3b, 0xc5, 0xd6, 0xd1, 0x29, 0xb6, 0x8e, - 0x4f, 0xb1, 0xf5, 0x62, 0xe2, 0x35, 0x6e, 0xdb, 0xea, 0xf9, 0xbb, 0xf9, 0x2d, 0x00, 0x00, 0xff, - 0xff, 0x3f, 0x4a, 0x4a, 0x48, 0xb8, 0x05, 0x00, 0x00, -} - -func (this *PrometheusInstantQueryResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusInstantQueryResponse) - if !ok { - that2, ok := that.(PrometheusInstantQueryResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Status != that1.Status { - return false - } - if !this.Data.Equal(&that1.Data) { - return false - } - if this.ErrorType != that1.ErrorType { - return false - } - if this.Error != that1.Error { - return false - } - if len(this.Headers) != len(that1.Headers) { - return false - } - for i := range this.Headers { - if !this.Headers[i].Equal(that1.Headers[i]) { - return false - } - } - if len(this.Warnings) != len(that1.Warnings) { - return false - } - for i := range this.Warnings { - if this.Warnings[i] != that1.Warnings[i] { - return false - } - } - return true -} -func (this *PrometheusInstantQueryData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusInstantQueryData) - if !ok { - that2, ok := that.(PrometheusInstantQueryData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ResultType != that1.ResultType { - return false - } - if !this.Result.Equal(&that1.Result) { - return false - } - if !this.Stats.Equal(that1.Stats) { - return false - } - return true -} -func (this *PrometheusInstantQueryResult) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusInstantQueryResult) - if !ok { - that2, ok := that.(PrometheusInstantQueryResult) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if that1.Result == nil { - if this.Result != nil { - return false - } - } else if this.Result == nil { - return false - } else if !this.Result.Equal(that1.Result) { - return false - } - return true -} -func (this *PrometheusInstantQueryResult_Vector) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusInstantQueryResult_Vector) - if !ok { - that2, ok := that.(PrometheusInstantQueryResult_Vector) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Vector.Equal(that1.Vector) { - return false - } - return true -} -func (this *PrometheusInstantQueryResult_RawBytes) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusInstantQueryResult_RawBytes) - if !ok { - that2, ok := that.(PrometheusInstantQueryResult_RawBytes) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.RawBytes, that1.RawBytes) { - return false - } - return true -} -func (this *PrometheusInstantQueryResult_Matrix) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusInstantQueryResult_Matrix) - if !ok { - that2, ok := that.(PrometheusInstantQueryResult_Matrix) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Matrix.Equal(that1.Matrix) { - return false - } - return true -} -func (this *Vector) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Vector) - if !ok { - that2, ok := that.(Vector) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Samples) != len(that1.Samples) { - return false - } - for i := range this.Samples { - if !this.Samples[i].Equal(that1.Samples[i]) { - return false - } - } - return true -} -func (this *Sample) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Sample) - if !ok { - that2, ok := that.(Sample) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if !this.Sample.Equal(that1.Sample) { - return false - } - if !this.Histogram.Equal(that1.Histogram) { - return false - } - return true -} -func (this *Matrix) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Matrix) - if !ok { - that2, ok := that.(Matrix) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.SampleStreams) != len(that1.SampleStreams) { - return false - } - for i := range this.SampleStreams { - if !this.SampleStreams[i].Equal(&that1.SampleStreams[i]) { - return false - } - } - return true -} -func (this *PrometheusInstantQueryResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&instantquery.PrometheusInstantQueryResponse{") - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - s = append(s, "Data: "+strings.Replace(this.Data.GoString(), `&`, ``, 1)+",\n") - s = append(s, "ErrorType: "+fmt.Sprintf("%#v", this.ErrorType)+",\n") - s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") - if this.Headers != nil { - s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") - } - s = append(s, "Warnings: "+fmt.Sprintf("%#v", this.Warnings)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusInstantQueryData) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&instantquery.PrometheusInstantQueryData{") - s = append(s, "ResultType: "+fmt.Sprintf("%#v", this.ResultType)+",\n") - s = append(s, "Result: "+strings.Replace(this.Result.GoString(), `&`, ``, 1)+",\n") - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusInstantQueryResult) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&instantquery.PrometheusInstantQueryResult{") - if this.Result != nil { - s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusInstantQueryResult_Vector) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&instantquery.PrometheusInstantQueryResult_Vector{` + - `Vector:` + fmt.Sprintf("%#v", this.Vector) + `}`}, ", ") - return s -} -func (this *PrometheusInstantQueryResult_RawBytes) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&instantquery.PrometheusInstantQueryResult_RawBytes{` + - `RawBytes:` + fmt.Sprintf("%#v", this.RawBytes) + `}`}, ", ") - return s -} -func (this *PrometheusInstantQueryResult_Matrix) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&instantquery.PrometheusInstantQueryResult_Matrix{` + - `Matrix:` + fmt.Sprintf("%#v", this.Matrix) + `}`}, ", ") - return s -} -func (this *Vector) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&instantquery.Vector{") - if this.Samples != nil { - s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Sample) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&instantquery.Sample{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Sample != nil { - s = append(s, "Sample: "+fmt.Sprintf("%#v", this.Sample)+",\n") - } - if this.Histogram != nil { - s = append(s, "Histogram: "+fmt.Sprintf("%#v", this.Histogram)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Matrix) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&instantquery.Matrix{") - if this.SampleStreams != nil { - vs := make([]*tripperware.SampleStream, len(this.SampleStreams)) - for i := range vs { - vs[i] = &this.SampleStreams[i] - } - s = append(s, "SampleStreams: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringInstantquery(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *PrometheusInstantQueryResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusInstantQueryResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusInstantQueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Warnings) > 0 { - for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Warnings[iNdEx]) - copy(dAtA[i:], m.Warnings[iNdEx]) - i = encodeVarintInstantquery(dAtA, i, uint64(len(m.Warnings[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintInstantquery(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x22 - } - if len(m.ErrorType) > 0 { - i -= len(m.ErrorType) - copy(dAtA[i:], m.ErrorType) - i = encodeVarintInstantquery(dAtA, i, uint64(len(m.ErrorType))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Status) > 0 { - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintInstantquery(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusInstantQueryData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusInstantQueryData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusInstantQueryData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Stats != nil { - { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.ResultType) > 0 { - i -= len(m.ResultType) - copy(dAtA[i:], m.ResultType) - i = encodeVarintInstantquery(dAtA, i, uint64(len(m.ResultType))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusInstantQueryResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusInstantQueryResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusInstantQueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Result != nil { - { - size := m.Result.Size() - i -= size - if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *PrometheusInstantQueryResult_Vector) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *PrometheusInstantQueryResult_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Vector != nil { - { - size, err := m.Vector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *PrometheusInstantQueryResult_RawBytes) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *PrometheusInstantQueryResult_RawBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RawBytes != nil { - i -= len(m.RawBytes) - copy(dAtA[i:], m.RawBytes) - i = encodeVarintInstantquery(dAtA, i, uint64(len(m.RawBytes))) - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *PrometheusInstantQueryResult_Matrix) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *PrometheusInstantQueryResult_Matrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Matrix != nil { - { - size, err := m.Matrix.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *Vector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Vector) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Sample) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Histogram != nil { - { - size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Sample != nil { - { - size, err := m.Sample.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Matrix) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Matrix) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Matrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SampleStreams) > 0 { - for iNdEx := len(m.SampleStreams) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.SampleStreams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintInstantquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintInstantquery(dAtA []byte, offset int, v uint64) int { - offset -= sovInstantquery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PrometheusInstantQueryResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Status) - if l > 0 { - n += 1 + l + sovInstantquery(uint64(l)) - } - l = m.Data.Size() - n += 1 + l + sovInstantquery(uint64(l)) - l = len(m.ErrorType) - if l > 0 { - n += 1 + l + sovInstantquery(uint64(l)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovInstantquery(uint64(l)) - } - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - } - if len(m.Warnings) > 0 { - for _, s := range m.Warnings { - l = len(s) - n += 1 + l + sovInstantquery(uint64(l)) - } - } - return n -} - -func (m *PrometheusInstantQueryData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ResultType) - if l > 0 { - n += 1 + l + sovInstantquery(uint64(l)) - } - l = m.Result.Size() - n += 1 + l + sovInstantquery(uint64(l)) - if m.Stats != nil { - l = m.Stats.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - return n -} - -func (m *PrometheusInstantQueryResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Result != nil { - n += m.Result.Size() - } - return n -} - -func (m *PrometheusInstantQueryResult_Vector) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Vector != nil { - l = m.Vector.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - return n -} -func (m *PrometheusInstantQueryResult_RawBytes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RawBytes != nil { - l = len(m.RawBytes) - n += 1 + l + sovInstantquery(uint64(l)) - } - return n -} -func (m *PrometheusInstantQueryResult_Matrix) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Matrix != nil { - l = m.Matrix.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - return n -} -func (m *Vector) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Samples) > 0 { - for _, e := range m.Samples { - l = e.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - } - return n -} - -func (m *Sample) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - } - if m.Sample != nil { - l = m.Sample.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - if m.Histogram != nil { - l = m.Histogram.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - return n -} - -func (m *Matrix) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.SampleStreams) > 0 { - for _, e := range m.SampleStreams { - l = e.Size() - n += 1 + l + sovInstantquery(uint64(l)) - } - } - return n -} - -func sovInstantquery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozInstantquery(x uint64) (n int) { - return sovInstantquery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PrometheusInstantQueryResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForHeaders := "[]*PrometheusResponseHeader{" - for _, f := range this.Headers { - repeatedStringForHeaders += strings.Replace(fmt.Sprintf("%v", f), "PrometheusResponseHeader", "tripperware.PrometheusResponseHeader", 1) + "," - } - repeatedStringForHeaders += "}" - s := strings.Join([]string{`&PrometheusInstantQueryResponse{`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "PrometheusInstantQueryData", "PrometheusInstantQueryData", 1), `&`, ``, 1) + `,`, - `ErrorType:` + fmt.Sprintf("%v", this.ErrorType) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Headers:` + repeatedStringForHeaders + `,`, - `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusInstantQueryData) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusInstantQueryData{`, - `ResultType:` + fmt.Sprintf("%v", this.ResultType) + `,`, - `Result:` + strings.Replace(strings.Replace(this.Result.String(), "PrometheusInstantQueryResult", "PrometheusInstantQueryResult", 1), `&`, ``, 1) + `,`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "PrometheusResponseStats", "tripperware.PrometheusResponseStats", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusInstantQueryResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusInstantQueryResult{`, - `Result:` + fmt.Sprintf("%v", this.Result) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusInstantQueryResult_Vector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusInstantQueryResult_Vector{`, - `Vector:` + strings.Replace(fmt.Sprintf("%v", this.Vector), "Vector", "Vector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusInstantQueryResult_RawBytes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusInstantQueryResult_RawBytes{`, - `RawBytes:` + fmt.Sprintf("%v", this.RawBytes) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusInstantQueryResult_Matrix) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusInstantQueryResult_Matrix{`, - `Matrix:` + strings.Replace(fmt.Sprintf("%v", this.Matrix), "Matrix", "Matrix", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Vector) String() string { - if this == nil { - return "nil" - } - repeatedStringForSamples := "[]*Sample{" - for _, f := range this.Samples { - repeatedStringForSamples += strings.Replace(f.String(), "Sample", "Sample", 1) + "," - } - repeatedStringForSamples += "}" - s := strings.Join([]string{`&Vector{`, - `Samples:` + repeatedStringForSamples + `,`, - `}`, - }, "") - return s -} -func (this *Sample) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Sample{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Sample:` + strings.Replace(fmt.Sprintf("%v", this.Sample), "Sample", "cortexpb.Sample", 1) + `,`, - `Histogram:` + strings.Replace(fmt.Sprintf("%v", this.Histogram), "SampleHistogramPair", "tripperware.SampleHistogramPair", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Matrix) String() string { - if this == nil { - return "nil" - } - repeatedStringForSampleStreams := "[]SampleStream{" - for _, f := range this.SampleStreams { - repeatedStringForSampleStreams += fmt.Sprintf("%v", f) + "," - } - repeatedStringForSampleStreams += "}" - s := strings.Join([]string{`&Matrix{`, - `SampleStreams:` + repeatedStringForSampleStreams + `,`, - `}`, - }, "") - return s -} -func valueToStringInstantquery(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PrometheusInstantQueryResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusInstantQueryResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusInstantQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, &tripperware.PrometheusResponseHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipInstantquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusInstantQueryData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusInstantQueryData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusInstantQueryData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResultType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stats == nil { - m.Stats = &tripperware.PrometheusResponseStats{} - } - if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipInstantquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusInstantQueryResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusInstantQueryResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusInstantQueryResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Vector{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &PrometheusInstantQueryResult_Vector{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawBytes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.Result = &PrometheusInstantQueryResult_RawBytes{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matrix", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Matrix{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &PrometheusInstantQueryResult_Matrix{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipInstantquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Vector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Vector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Vector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Samples = append(m.Samples, &Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipInstantquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sample) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sample: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Sample == nil { - m.Sample = &cortexpb.Sample{} - } - if err := m.Sample.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Histogram == nil { - m.Histogram = &tripperware.SampleHistogramPair{} - } - if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipInstantquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Matrix) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Matrix: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Matrix: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SampleStreams", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowInstantquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthInstantquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthInstantquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SampleStreams = append(m.SampleStreams, tripperware.SampleStream{}) - if err := m.SampleStreams[len(m.SampleStreams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipInstantquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthInstantquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipInstantquery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowInstantquery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowInstantquery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowInstantquery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthInstantquery - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthInstantquery - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowInstantquery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipInstantquery(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthInstantquery - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthInstantquery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowInstantquery = fmt.Errorf("proto: integer overflow") -) diff --git a/pkg/querier/tripperware/instantquery/instantquery.proto b/pkg/querier/tripperware/instantquery/instantquery.proto deleted file mode 100644 index d67b1ad061..0000000000 --- a/pkg/querier/tripperware/instantquery/instantquery.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package instantquery; - -option go_package = "instantquery"; - -import "gogoproto/gogo.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; -import "github.com/cortexproject/cortex/pkg/querier/tripperware/query.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - - -message PrometheusInstantQueryResponse { - string Status = 1 [(gogoproto.jsontag) = "status"]; - PrometheusInstantQueryData Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; - string ErrorType = 3 [(gogoproto.jsontag) = "errorType,omitempty"]; - string Error = 4 [(gogoproto.jsontag) = "error,omitempty"]; - repeated tripperware.PrometheusResponseHeader Headers = 5 [(gogoproto.jsontag) = "-"]; - repeated string Warnings = 6 [(gogoproto.jsontag) = "warnings,omitempty"]; -} - -message PrometheusInstantQueryData { - string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; - PrometheusInstantQueryResult Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result"]; - tripperware.PrometheusResponseStats stats = 3 [(gogoproto.jsontag) = "stats,omitempty"]; -} - -message PrometheusInstantQueryResult { - oneof result { - Vector vector = 1; - bytes rawBytes = 2; - Matrix matrix = 3; - } -} - -message Vector { - repeated Sample samples = 1; -} - -message Sample { - repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "metric", (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter"]; - cortexpb.Sample sample = 2 [(gogoproto.nullable) = true, (gogoproto.jsontag) = "value"]; - tripperware.SampleHistogramPair histogram = 3 [(gogoproto.nullable) = true, (gogoproto.jsontag) = "histogram"]; -} - -message Matrix { - repeated tripperware.SampleStream sampleStreams = 1 [(gogoproto.nullable) = false]; -} diff --git a/pkg/querier/tripperware/instantquery/limits_test.go b/pkg/querier/tripperware/instantquery/limits_test.go index 4bf1cfac3c..e209d366f3 100644 --- a/pkg/querier/tripperware/instantquery/limits_test.go +++ b/pkg/querier/tripperware/instantquery/limits_test.go @@ -71,12 +71,12 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() - req := &PrometheusRequest{Query: testData.query} + req := &tripperware.PrometheusRequest{Query: testData.query} limits := &mockLimits{maxQueryLength: testData.maxQueryLength} middleware := NewLimitsMiddleware(limits, 5*time.Minute) - innerRes := NewEmptyPrometheusInstantQueryResponse() + innerRes := tripperware.NewEmptyPrometheusResponse(true) inner := &mockHandler{} inner.On("Do", mock.Anything, mock.Anything).Return(innerRes, nil) diff --git a/pkg/querier/tripperware/merge.go b/pkg/querier/tripperware/merge.go index 2ae5a9793c..6266946b4a 100644 --- a/pkg/querier/tripperware/merge.go +++ b/pkg/querier/tripperware/merge.go @@ -1,13 +1,335 @@ package tripperware import ( + "context" + "fmt" "sort" + "github.com/prometheus/common/model" + promqlparser "github.com/prometheus/prometheus/promql/parser" + "github.com/thanos-io/thanos/pkg/strutil" + "github.com/cortexproject/cortex/pkg/cortexpb" ) -// MergeSampleStreams deduplicates sample streams using a map. -func MergeSampleStreams(output map[string]SampleStream, sampleStreams []SampleStream) { +const StatusSuccess = "success" + +type byFirstTime []*PrometheusResponse + +func (a byFirstTime) Len() int { return len(a) } +func (a byFirstTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byFirstTime) Less(i, j int) bool { return a[i].minTime() < a[j].minTime() } + +func (resp *PrometheusResponse) minTime() int64 { + data := resp.GetData() + res := data.GetResult() + // minTime should only be called when the response is fron range query. + matrix := res.GetMatrix() + sampleStreams := matrix.GetSampleStreams() + if len(sampleStreams) == 0 { + return -1 + } + if len(sampleStreams[0].Samples) == 0 { + return -1 + } + return sampleStreams[0].Samples[0].TimestampMs +} + +// MergeResponse merges multiple Response into one. +func MergeResponse(ctx context.Context, sumStats bool, req Request, responses ...Response) (Response, error) { + if len(responses) == 1 { + return responses[0], nil + } + promResponses := make([]*PrometheusResponse, 0, len(responses)) + warnings := make([][]string, 0, len(responses)) + for _, resp := range responses { + promResponses = append(promResponses, resp.(*PrometheusResponse)) + if w := resp.(*PrometheusResponse).Warnings; w != nil { + warnings = append(warnings, w) + } + } + + // Check if it is a range query. Range query passed req as nil since + // we only use request when result type is a vector. + if req == nil { + sort.Sort(byFirstTime(promResponses)) + } + var data PrometheusData + // For now, we only shard queries that returns a vector. + switch promResponses[0].Data.ResultType { + case model.ValVector.String(): + v, err := vectorMerge(ctx, req, promResponses) + if err != nil { + return nil, err + } + data = PrometheusData{ + ResultType: model.ValVector.String(), + Result: PrometheusQueryResult{ + Result: &PrometheusQueryResult_Vector{ + Vector: v, + }, + }, + Stats: statsMerge(sumStats, promResponses), + } + case model.ValMatrix.String(): + sampleStreams, err := matrixMerge(ctx, promResponses) + if err != nil { + return nil, err + } + + data = PrometheusData{ + ResultType: model.ValMatrix.String(), + Result: PrometheusQueryResult{ + Result: &PrometheusQueryResult_Matrix{ + Matrix: &Matrix{ + SampleStreams: sampleStreams, + }, + }, + }, + Stats: statsMerge(sumStats, promResponses), + } + default: + return nil, fmt.Errorf("unexpected result type: %s", promResponses[0].Data.ResultType) + } + + res := &PrometheusResponse{ + Status: StatusSuccess, + Data: data, + Warnings: strutil.MergeUnsortedSlices(warnings...), + } + return res, nil +} + +func matrixMerge(ctx context.Context, resps []*PrometheusResponse) ([]SampleStream, error) { + output := make(map[string]SampleStream) + for _, resp := range resps { + if err := ctx.Err(); err != nil { + return nil, err + } + if resp == nil { + continue + } + if resp.Data.Result.GetMatrix() == nil { + continue + } + mergeSampleStreams(output, resp.Data.Result.GetMatrix().GetSampleStreams()) + } + + keys := make([]string, 0, len(output)) + for key := range output { + keys = append(keys, key) + } + sort.Strings(keys) + + result := make([]SampleStream, 0, len(output)) + for _, key := range keys { + result = append(result, output[key]) + } + + return result, nil +} + +func vectorMerge(ctx context.Context, req Request, resps []*PrometheusResponse) (*Vector, error) { + output := map[string]*Sample{} + metrics := []string{} // Used to preserve the order for topk and bottomk. + sortPlan, err := sortPlanForQuery(req.GetQuery()) + if err != nil { + return nil, err + } + buf := make([]byte, 0, 1024) + for _, resp := range resps { + if err = ctx.Err(); err != nil { + return nil, err + } + if resp == nil { + continue + } + // Merge vector result samples only. Skip other types such as + // string, scalar as those are not sharable. + if resp.Data.Result.GetVector() == nil { + continue + } + for _, sample := range resp.Data.Result.GetVector().Samples { + s := sample + if s == nil { + continue + } + metric := string(cortexpb.FromLabelAdaptersToLabels(sample.Labels).Bytes(buf)) + if existingSample, ok := output[metric]; !ok { + output[metric] = s + metrics = append(metrics, metric) // Preserve the order of metric. + } else if existingSample.GetTimestampMs() < s.GetTimestampMs() { + // Choose the latest sample if we see overlap. + output[metric] = s + } + } + } + + result := &Vector{ + Samples: make([]*Sample, 0, len(output)), + } + + if len(output) == 0 { + return result, nil + } + + if sortPlan == mergeOnly { + for _, k := range metrics { + result.Samples = append(result.Samples, output[k]) + } + return result, nil + } + + samples := make([]*pair, 0, len(output)) + for k, v := range output { + samples = append(samples, &pair{ + metric: k, + s: v, + }) + } + + // TODO: What if we have mixed float and histogram samples in the response? + // Then the sorting behavior is undefined. Prometheus doesn't handle it. + sort.Slice(samples, func(i, j int) bool { + // Order is determined by vector. + switch sortPlan { + case sortByValuesAsc: + return getSortValueFromPair(samples, i) < getSortValueFromPair(samples, j) + case sortByValuesDesc: + return getSortValueFromPair(samples, i) > getSortValueFromPair(samples, j) + } + return samples[i].metric < samples[j].metric + }) + + for _, p := range samples { + result.Samples = append(result.Samples, p.s) + } + return result, nil +} + +// statsMerge merge the stats from 2 responses this function is similar to matrixMerge +func statsMerge(shouldSumStats bool, resps []*PrometheusResponse) *PrometheusResponseStats { + output := map[int64]*PrometheusResponseQueryableSamplesStatsPerStep{} + hasStats := false + for _, resp := range resps { + if resp.Data.Stats == nil { + continue + } + + hasStats = true + if resp.Data.Stats.Samples == nil { + continue + } + + for _, s := range resp.Data.Stats.Samples.TotalQueryableSamplesPerStep { + if shouldSumStats { + if stats, ok := output[s.GetTimestampMs()]; ok { + stats.Value += s.Value + } else { + output[s.GetTimestampMs()] = s + } + } else { + output[s.GetTimestampMs()] = s + } + } + } + + if !hasStats { + return nil + } + keys := make([]int64, 0, len(output)) + for key := range output { + keys = append(keys, key) + } + + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + + result := &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}} + for _, key := range keys { + result.Samples.TotalQueryableSamplesPerStep = append(result.Samples.TotalQueryableSamplesPerStep, output[key]) + result.Samples.TotalQueryableSamples += output[key].Value + } + + return result +} + +type sortPlan int + +const ( + mergeOnly sortPlan = 0 + sortByValuesAsc sortPlan = 1 + sortByValuesDesc sortPlan = 2 + sortByLabels sortPlan = 3 +) + +type pair struct { + metric string + s *Sample +} + +// getSortValueFromPair gets the float value used for sorting from samples. +// If float sample, use sample value. If histogram sample, use histogram sum. +// This is the same behavior as Prometheus https://github.com/prometheus/prometheus/blob/v2.53.0/promql/functions.go#L1595. +func getSortValueFromPair(samples []*pair, i int) float64 { + if samples[i].s.Histogram != nil { + return samples[i].s.Histogram.Histogram.Sum + } + // Impossible to have both histogram and sample nil. + return samples[i].s.Sample.Value +} + +func sortPlanForQuery(q string) (sortPlan, error) { + expr, err := promqlparser.ParseExpr(q) + if err != nil { + return 0, err + } + // Check if the root expression is topk or bottomk + if aggr, ok := expr.(*promqlparser.AggregateExpr); ok { + if aggr.Op == promqlparser.TOPK || aggr.Op == promqlparser.BOTTOMK { + return mergeOnly, nil + } + } + checkForSort := func(expr promqlparser.Expr) (sortAsc, sortDesc bool) { + if n, ok := expr.(*promqlparser.Call); ok { + if n.Func != nil { + if n.Func.Name == "sort" { + sortAsc = true + } + if n.Func.Name == "sort_desc" { + sortDesc = true + } + } + } + return sortAsc, sortDesc + } + // Check the root expression for sort + if sortAsc, sortDesc := checkForSort(expr); sortAsc || sortDesc { + if sortAsc { + return sortByValuesAsc, nil + } + return sortByValuesDesc, nil + } + + // If the root expression is a binary expression, check the LHS and RHS for sort + if bin, ok := expr.(*promqlparser.BinaryExpr); ok { + if sortAsc, sortDesc := checkForSort(bin.LHS); sortAsc || sortDesc { + if sortAsc { + return sortByValuesAsc, nil + } + return sortByValuesDesc, nil + } + if sortAsc, sortDesc := checkForSort(bin.RHS); sortAsc || sortDesc { + if sortAsc { + return sortByValuesAsc, nil + } + return sortByValuesDesc, nil + } + } + return sortByLabels, nil +} + +// mergeSampleStreams deduplicates sample streams using a map. +func mergeSampleStreams(output map[string]SampleStream, sampleStreams []SampleStream) { buf := make([]byte, 0, 1024) for _, stream := range sampleStreams { metric := string(cortexpb.FromLabelAdaptersToLabels(stream.Labels).Bytes(buf)) diff --git a/pkg/querier/tripperware/merge_test.go b/pkg/querier/tripperware/merge_test.go index 124cfd08b5..6a51ca7072 100644 --- a/pkg/querier/tripperware/merge_test.go +++ b/pkg/querier/tripperware/merge_test.go @@ -363,7 +363,7 @@ func TestMergeSampleStreams(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() output := make(map[string]SampleStream) - MergeSampleStreams(output, tc.sampleStreams) + mergeSampleStreams(output, tc.sampleStreams) assert.Equal(t, tc.expectedOutput, output) }) } @@ -596,3 +596,69 @@ func TestSliceHistograms(t *testing.T) { }) } } + +func Test_sortPlanForQuery(t *testing.T) { + tc := []struct { + query string + expectedPlan sortPlan + err bool + }{ + { + query: "invalid(10, up)", + expectedPlan: mergeOnly, + err: true, + }, + { + query: "topk(10, up)", + expectedPlan: mergeOnly, + err: false, + }, + { + query: "bottomk(10, up)", + expectedPlan: mergeOnly, + err: false, + }, + { + query: "1 + topk(10, up)", + expectedPlan: sortByLabels, + err: false, + }, + { + query: "1 + sort_desc(sum by (job) (up) )", + expectedPlan: sortByValuesDesc, + err: false, + }, + { + query: "sort(topk by (job) (10, up))", + expectedPlan: sortByValuesAsc, + err: false, + }, + { + query: "topk(5, up) by (job) + sort_desc(up)", + expectedPlan: sortByValuesDesc, + err: false, + }, + { + query: "sort(up) + topk(5, up) by (job)", + expectedPlan: sortByValuesAsc, + err: false, + }, + { + query: "sum(up) by (job)", + expectedPlan: sortByLabels, + err: false, + }, + } + + for _, tc := range tc { + t.Run(tc.query, func(t *testing.T) { + p, err := sortPlanForQuery(tc.query) + if tc.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedPlan, p) + } + }) + } +} diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 784de900b5..b985f6b3dd 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "net/http" - "sort" "strconv" "strings" "time" @@ -19,9 +18,11 @@ import ( "github.com/gogo/protobuf/proto" jsoniter "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" + otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/util/jsonutil" "github.com/weaveworks/common/httpgrpc" @@ -119,6 +120,164 @@ func decodeSampleStream(ptr unsafe.Pointer, iter *jsoniter.Iterator) { } } +type CachingOptions struct { + Disabled bool +} + +type PrometheusRequest struct { + Request + Time int64 + Start int64 + End int64 + Step int64 + Timeout time.Duration + Query string + Path string + Headers http.Header + Stats string + CachingOptions CachingOptions +} + +func (m *PrometheusRequest) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *PrometheusRequest) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *PrometheusRequest) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *PrometheusRequest) GetStep() int64 { + if m != nil { + return m.Step + } + return 0 +} + +func (m *PrometheusRequest) GetTimeout() time.Duration { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *PrometheusRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *PrometheusRequest) GetCachingOptions() CachingOptions { + if m != nil { + return m.CachingOptions + } + return CachingOptions{} +} + +func (m *PrometheusRequest) GetHeaders() http.Header { + if m != nil { + return m.Headers + } + return nil +} + +func (m *PrometheusRequest) GetStats() string { + if m != nil { + return m.Stats + } + return "" +} + +// WithStartEnd clones the current `PrometheusRequest` with a new `start` and `end` timestamp. +func (m *PrometheusRequest) WithStartEnd(start int64, end int64) Request { + new := *m + new.Start = start + new.End = end + return &new +} + +// WithQuery clones the current `PrometheusRequest` with a new query. +func (m *PrometheusRequest) WithQuery(query string) Request { + new := *m + new.Query = query + return &new +} + +// WithStats clones the current `PrometheusRequest` with a new stats. +func (m *PrometheusRequest) WithStats(stats string) Request { + new := *m + new.Stats = stats + return &new +} + +// LogToSpan logs the current `PrometheusRequest` parameters to the specified span. +func (m *PrometheusRequest) LogToSpan(sp opentracing.Span) { + if m.GetStep() > 0 { + sp.LogFields( + otlog.String("query", m.GetQuery()), + otlog.String("start", timestamp.Time(m.GetStart()).String()), + otlog.String("end", timestamp.Time(m.GetEnd()).String()), + otlog.Int64("step (ms)", m.GetStep()), + ) + } else if m != nil { + sp.LogFields( + otlog.String("query", m.GetQuery()), + otlog.String("time", timestamp.Time(m.Time).String()), + ) + } +} + +func (resp *PrometheusResponse) HTTPHeaders() map[string][]string { + if resp != nil && resp.GetHeaders() != nil { + r := map[string][]string{} + for _, header := range resp.GetHeaders() { + if header != nil { + r[header.Name] = header.Values + } + } + + return r + } + return nil +} + +// NewEmptyPrometheusResponse returns an empty successful Prometheus query range response. +func NewEmptyPrometheusResponse(instant bool) *PrometheusResponse { + if instant { + return &PrometheusResponse{ + Status: StatusSuccess, + Data: PrometheusData{ + ResultType: model.ValVector.String(), + Result: PrometheusQueryResult{ + Result: &PrometheusQueryResult_Vector{}, + }, + }, + } + } + return &PrometheusResponse{ + Status: StatusSuccess, + Data: PrometheusData{ + ResultType: model.ValMatrix.String(), + Result: PrometheusQueryResult{ + Result: &PrometheusQueryResult_Matrix{}, + }, + }, + } +} + func encodeSampleStream(ptr unsafe.Pointer, stream *jsoniter.Stream) { ss := (*SampleStream)(ptr) stream.WriteObjectStart() @@ -160,6 +319,58 @@ func encodeSampleStream(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteObjectEnd() } +func decodeSample(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + ss := (*Sample)(ptr) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "metric": + metricString := iter.ReadAny().ToString() + lbls := labels.Labels{} + if err := json.UnmarshalFromString(metricString, &lbls); err != nil { + iter.ReportError("unmarshal Sample", err.Error()) + return + } + ss.Labels = cortexpb.FromLabelsToLabelAdapters(lbls) + case "value": + ss.Sample = &cortexpb.Sample{} + cortexpb.SampleJsoniterDecode(unsafe.Pointer(ss.Sample), iter) + case "histogram": + ss.Histogram = &SampleHistogramPair{} + UnmarshalSampleHistogramPairJSON(unsafe.Pointer(ss.Histogram), iter) + default: + iter.ReportError("unmarshal Sample", fmt.Sprint("unexpected key:", field)) + return + } + } +} + +func encodeSample(ptr unsafe.Pointer, stream *jsoniter.Stream) { + ss := (*Sample)(ptr) + stream.WriteObjectStart() + + stream.WriteObjectField(`metric`) + lbls, err := cortexpb.FromLabelAdaptersToLabels(ss.Labels).MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), lbls...)) + + if ss.Sample != nil { + stream.WriteMore() + stream.WriteObjectField(`value`) + cortexpb.SampleJsoniterEncode(unsafe.Pointer(ss.Sample), stream) + } + + if ss.Histogram != nil { + stream.WriteMore() + stream.WriteObjectField(`histogram`) + MarshalSampleHistogramPairJSON(unsafe.Pointer(ss.Histogram), stream) + } + + stream.WriteObjectEnd() +} + func PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { if !iter.ReadArray() { iter.ReportError("tripperware.PrometheusResponseQueryableSamplesStatsPerStep", "expected [") @@ -198,6 +409,8 @@ func init() { jsoniter.RegisterTypeDecoderFunc("tripperware.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode) jsoniter.RegisterTypeEncoderFunc("tripperware.SampleStream", encodeSampleStream, marshalJSONIsEmpty) jsoniter.RegisterTypeDecoderFunc("tripperware.SampleStream", decodeSampleStream) + jsoniter.RegisterTypeEncoderFunc("tripperware.Sample", encodeSample, marshalJSONIsEmpty) + jsoniter.RegisterTypeDecoderFunc("tripperware.Sample", decodeSample) jsoniter.RegisterTypeEncoderFunc("tripperware.SampleHistogramPair", MarshalSampleHistogramPairJSON, marshalJSONIsEmpty) jsoniter.RegisterTypeDecoderFunc("tripperware.SampleHistogramPair", UnmarshalSampleHistogramPairJSON) } @@ -271,21 +484,79 @@ func BodyBufferFromHTTPGRPCResponse(res *httpgrpc.HTTPResponse, logger log.Logge return res.Body, nil } -func StatsMerge(stats map[int64]*PrometheusResponseQueryableSamplesStatsPerStep) *PrometheusResponseStats { - keys := make([]int64, 0, len(stats)) - for key := range stats { - keys = append(keys, key) +// UnmarshalJSON implements json.Unmarshaler. +func (s *PrometheusData) UnmarshalJSON(data []byte) error { + var queryData struct { + ResultType string `json:"resultType"` + Stats *PrometheusResponseStats `json:"stats,omitempty"` } - sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) - - result := &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}} - for _, key := range keys { - result.Samples.TotalQueryableSamplesPerStep = append(result.Samples.TotalQueryableSamplesPerStep, stats[key]) - result.Samples.TotalQueryableSamples += stats[key].Value + if err := json.Unmarshal(data, &queryData); err != nil { + return err } + s.ResultType = queryData.ResultType + s.Stats = queryData.Stats + switch s.ResultType { + case model.ValVector.String(): + var result struct { + Samples []*Sample `json:"result"` + } + if err := json.Unmarshal(data, &result); err != nil { + return err + } + s.Result = PrometheusQueryResult{ + Result: &PrometheusQueryResult_Vector{Vector: &Vector{ + Samples: result.Samples, + }}, + } + case model.ValMatrix.String(): + var result struct { + SampleStreams []SampleStream `json:"result"` + } + if err := json.Unmarshal(data, &result); err != nil { + return err + } + s.Result = PrometheusQueryResult{ + Result: &PrometheusQueryResult_Matrix{Matrix: &Matrix{ + SampleStreams: result.SampleStreams, + }}, + } + default: + s.Result = PrometheusQueryResult{ + Result: &PrometheusQueryResult_RawBytes{data}, + } + } + return nil +} - return result +// MarshalJSON implements json.Marshaler. +func (s *PrometheusData) MarshalJSON() ([]byte, error) { + switch s.ResultType { + case model.ValVector.String(): + res := struct { + ResultType string `json:"resultType"` + Data []*Sample `json:"result"` + Stats *PrometheusResponseStats `json:"stats,omitempty"` + }{ + ResultType: s.ResultType, + Data: s.Result.GetVector().Samples, + Stats: s.Stats, + } + return json.Marshal(res) + case model.ValMatrix.String(): + res := struct { + ResultType string `json:"resultType"` + Data []SampleStream `json:"result"` + Stats *PrometheusResponseStats `json:"stats,omitempty"` + }{ + ResultType: s.ResultType, + Data: s.Result.GetMatrix().SampleStreams, + Stats: s.Stats, + } + return json.Marshal(res) + default: + return s.Result.GetRawBytes(), nil + } } // Adapted from https://github.com/prometheus/client_golang/blob/4b158abea9470f75b6f07460cdc2189b91914562/api/prometheus/v1/api.go#L84. diff --git a/pkg/querier/tripperware/query.pb.go b/pkg/querier/tripperware/query.pb.go index 5c46214404..84c1c406bd 100644 --- a/pkg/querier/tripperware/query.pb.go +++ b/pkg/querier/tripperware/query.pb.go @@ -4,12 +4,15 @@ package tripperware import ( + bytes "bytes" encoding_binary "encoding/binary" fmt "fmt" cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" io "io" math "math" math_bits "math/bits" @@ -28,6 +31,267 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type PrometheusResponse struct { + Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` + Data PrometheusData `protobuf:"bytes,2,opt,name=Data,proto3" json:"data,omitempty"` + ErrorType string `protobuf:"bytes,3,opt,name=ErrorType,proto3" json:"errorType,omitempty"` + Error string `protobuf:"bytes,4,opt,name=Error,proto3" json:"error,omitempty"` + Headers []*PrometheusResponseHeader `protobuf:"bytes,5,rep,name=Headers,proto3" json:"-"` + Warnings []string `protobuf:"bytes,6,rep,name=Warnings,proto3" json:"warnings,omitempty"` +} + +func (m *PrometheusResponse) Reset() { *m = PrometheusResponse{} } +func (*PrometheusResponse) ProtoMessage() {} +func (*PrometheusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{0} +} +func (m *PrometheusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PrometheusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PrometheusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PrometheusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrometheusResponse.Merge(m, src) +} +func (m *PrometheusResponse) XXX_Size() int { + return m.Size() +} +func (m *PrometheusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PrometheusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PrometheusResponse proto.InternalMessageInfo + +func (m *PrometheusResponse) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *PrometheusResponse) GetData() PrometheusData { + if m != nil { + return m.Data + } + return PrometheusData{} +} + +func (m *PrometheusResponse) GetErrorType() string { + if m != nil { + return m.ErrorType + } + return "" +} + +func (m *PrometheusResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *PrometheusResponse) GetHeaders() []*PrometheusResponseHeader { + if m != nil { + return m.Headers + } + return nil +} + +func (m *PrometheusResponse) GetWarnings() []string { + if m != nil { + return m.Warnings + } + return nil +} + +type PrometheusData struct { + ResultType string `protobuf:"bytes,1,opt,name=ResultType,proto3" json:"resultType"` + Result PrometheusQueryResult `protobuf:"bytes,2,opt,name=Result,proto3" json:"result"` + Stats *PrometheusResponseStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` +} + +func (m *PrometheusData) Reset() { *m = PrometheusData{} } +func (*PrometheusData) ProtoMessage() {} +func (*PrometheusData) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{1} +} +func (m *PrometheusData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PrometheusData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PrometheusData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PrometheusData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrometheusData.Merge(m, src) +} +func (m *PrometheusData) XXX_Size() int { + return m.Size() +} +func (m *PrometheusData) XXX_DiscardUnknown() { + xxx_messageInfo_PrometheusData.DiscardUnknown(m) +} + +var xxx_messageInfo_PrometheusData proto.InternalMessageInfo + +func (m *PrometheusData) GetResultType() string { + if m != nil { + return m.ResultType + } + return "" +} + +func (m *PrometheusData) GetResult() PrometheusQueryResult { + if m != nil { + return m.Result + } + return PrometheusQueryResult{} +} + +func (m *PrometheusData) GetStats() *PrometheusResponseStats { + if m != nil { + return m.Stats + } + return nil +} + +type CachedResponse struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"` + // List of cached responses; non-overlapping and in order. + Extents []Extent `protobuf:"bytes,2,rep,name=extents,proto3" json:"extents"` +} + +func (m *CachedResponse) Reset() { *m = CachedResponse{} } +func (*CachedResponse) ProtoMessage() {} +func (*CachedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{2} +} +func (m *CachedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CachedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CachedResponse.Merge(m, src) +} +func (m *CachedResponse) XXX_Size() int { + return m.Size() +} +func (m *CachedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CachedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CachedResponse proto.InternalMessageInfo + +func (m *CachedResponse) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *CachedResponse) GetExtents() []Extent { + if m != nil { + return m.Extents + } + return nil +} + +type Extent struct { + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start"` + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end"` + TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"-"` + Response *types.Any `protobuf:"bytes,5,opt,name=response,proto3" json:"response"` +} + +func (m *Extent) Reset() { *m = Extent{} } +func (*Extent) ProtoMessage() {} +func (*Extent) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{3} +} +func (m *Extent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Extent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Extent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Extent.Merge(m, src) +} +func (m *Extent) XXX_Size() int { + return m.Size() +} +func (m *Extent) XXX_DiscardUnknown() { + xxx_messageInfo_Extent.DiscardUnknown(m) +} + +var xxx_messageInfo_Extent proto.InternalMessageInfo + +func (m *Extent) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Extent) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *Extent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +func (m *Extent) GetResponse() *types.Any { + if m != nil { + return m.Response + } + return nil +} + type SampleStream struct { Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"metric"` Samples []cortexpb.Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"values"` @@ -37,7 +301,7 @@ type SampleStream struct { func (m *SampleStream) Reset() { *m = SampleStream{} } func (*SampleStream) ProtoMessage() {} func (*SampleStream) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{0} + return fileDescriptor_5c6ac9b241082464, []int{4} } func (m *SampleStream) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -88,7 +352,7 @@ type SampleHistogramPair struct { func (m *SampleHistogramPair) Reset() { *m = SampleHistogramPair{} } func (*SampleHistogramPair) ProtoMessage() {} func (*SampleHistogramPair) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{1} + return fileDescriptor_5c6ac9b241082464, []int{5} } func (m *SampleHistogramPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -140,7 +404,7 @@ type SampleHistogram struct { func (m *SampleHistogram) Reset() { *m = SampleHistogram{} } func (*SampleHistogram) ProtoMessage() {} func (*SampleHistogram) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{2} + return fileDescriptor_5c6ac9b241082464, []int{6} } func (m *SampleHistogram) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -200,7 +464,7 @@ type HistogramBucket struct { func (m *HistogramBucket) Reset() { *m = HistogramBucket{} } func (*HistogramBucket) ProtoMessage() {} func (*HistogramBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{3} + return fileDescriptor_5c6ac9b241082464, []int{7} } func (m *HistogramBucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -264,7 +528,7 @@ type PrometheusResponseStats struct { func (m *PrometheusResponseStats) Reset() { *m = PrometheusResponseStats{} } func (*PrometheusResponseStats) ProtoMessage() {} func (*PrometheusResponseStats) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{4} + return fileDescriptor_5c6ac9b241082464, []int{8} } func (m *PrometheusResponseStats) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -308,7 +572,7 @@ type PrometheusResponseSamplesStats struct { func (m *PrometheusResponseSamplesStats) Reset() { *m = PrometheusResponseSamplesStats{} } func (*PrometheusResponseSamplesStats) ProtoMessage() {} func (*PrometheusResponseSamplesStats) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{5} + return fileDescriptor_5c6ac9b241082464, []int{9} } func (m *PrometheusResponseSamplesStats) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -361,7 +625,7 @@ func (m *PrometheusResponseQueryableSamplesStatsPerStep) Reset() { } func (*PrometheusResponseQueryableSamplesStatsPerStep) ProtoMessage() {} func (*PrometheusResponseQueryableSamplesStatsPerStep) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{6} + return fileDescriptor_5c6ac9b241082464, []int{10} } func (m *PrometheusResponseQueryableSamplesStatsPerStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +676,7 @@ type PrometheusResponseHeader struct { func (m *PrometheusResponseHeader) Reset() { *m = PrometheusResponseHeader{} } func (*PrometheusResponseHeader) ProtoMessage() {} func (*PrometheusResponseHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{7} + return fileDescriptor_5c6ac9b241082464, []int{11} } func (m *PrometheusResponseHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -455,22 +719,26 @@ func (m *PrometheusResponseHeader) GetValues() []string { return nil } -type PrometheusRequestHeader struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"-"` - Values []string `protobuf:"bytes,2,rep,name=Values,proto3" json:"-"` +type PrometheusQueryResult struct { + // Types that are valid to be assigned to Result: + // + // *PrometheusQueryResult_Vector + // *PrometheusQueryResult_RawBytes + // *PrometheusQueryResult_Matrix + Result isPrometheusQueryResult_Result `protobuf_oneof:"result"` } -func (m *PrometheusRequestHeader) Reset() { *m = PrometheusRequestHeader{} } -func (*PrometheusRequestHeader) ProtoMessage() {} -func (*PrometheusRequestHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_5c6ac9b241082464, []int{8} +func (m *PrometheusQueryResult) Reset() { *m = PrometheusQueryResult{} } +func (*PrometheusQueryResult) ProtoMessage() {} +func (*PrometheusQueryResult) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{12} } -func (m *PrometheusRequestHeader) XXX_Unmarshal(b []byte) error { +func (m *PrometheusQueryResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PrometheusRequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PrometheusQueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_PrometheusRequestHeader.Marshal(b, m, deterministic) + return xxx_messageInfo_PrometheusQueryResult.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -480,125 +748,491 @@ func (m *PrometheusRequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *PrometheusRequestHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusRequestHeader.Merge(m, src) +func (m *PrometheusQueryResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrometheusQueryResult.Merge(m, src) } -func (m *PrometheusRequestHeader) XXX_Size() int { +func (m *PrometheusQueryResult) XXX_Size() int { return m.Size() } -func (m *PrometheusRequestHeader) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusRequestHeader.DiscardUnknown(m) +func (m *PrometheusQueryResult) XXX_DiscardUnknown() { + xxx_messageInfo_PrometheusQueryResult.DiscardUnknown(m) } -var xxx_messageInfo_PrometheusRequestHeader proto.InternalMessageInfo +var xxx_messageInfo_PrometheusQueryResult proto.InternalMessageInfo -func (m *PrometheusRequestHeader) GetName() string { - if m != nil { - return m.Name - } - return "" +type isPrometheusQueryResult_Result interface { + isPrometheusQueryResult_Result() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type PrometheusQueryResult_Vector struct { + Vector *Vector `protobuf:"bytes,1,opt,name=vector,proto3,oneof"` +} +type PrometheusQueryResult_RawBytes struct { + RawBytes []byte `protobuf:"bytes,2,opt,name=rawBytes,proto3,oneof"` } +type PrometheusQueryResult_Matrix struct { + Matrix *Matrix `protobuf:"bytes,3,opt,name=matrix,proto3,oneof"` +} + +func (*PrometheusQueryResult_Vector) isPrometheusQueryResult_Result() {} +func (*PrometheusQueryResult_RawBytes) isPrometheusQueryResult_Result() {} +func (*PrometheusQueryResult_Matrix) isPrometheusQueryResult_Result() {} -func (m *PrometheusRequestHeader) GetValues() []string { +func (m *PrometheusQueryResult) GetResult() isPrometheusQueryResult_Result { if m != nil { - return m.Values + return m.Result } return nil } -func init() { - proto.RegisterType((*SampleStream)(nil), "tripperware.SampleStream") - proto.RegisterType((*SampleHistogramPair)(nil), "tripperware.SampleHistogramPair") - proto.RegisterType((*SampleHistogram)(nil), "tripperware.SampleHistogram") - proto.RegisterType((*HistogramBucket)(nil), "tripperware.HistogramBucket") - proto.RegisterType((*PrometheusResponseStats)(nil), "tripperware.PrometheusResponseStats") - proto.RegisterType((*PrometheusResponseSamplesStats)(nil), "tripperware.PrometheusResponseSamplesStats") - proto.RegisterType((*PrometheusResponseQueryableSamplesStatsPerStep)(nil), "tripperware.PrometheusResponseQueryableSamplesStatsPerStep") - proto.RegisterType((*PrometheusResponseHeader)(nil), "tripperware.PrometheusResponseHeader") - proto.RegisterType((*PrometheusRequestHeader)(nil), "tripperware.PrometheusRequestHeader") +func (m *PrometheusQueryResult) GetVector() *Vector { + if x, ok := m.GetResult().(*PrometheusQueryResult_Vector); ok { + return x.Vector + } + return nil } -func init() { proto.RegisterFile("query.proto", fileDescriptor_5c6ac9b241082464) } - -var fileDescriptor_5c6ac9b241082464 = []byte{ - // 646 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4f, 0x4f, 0x13, 0x41, - 0x14, 0xdf, 0x69, 0xa1, 0x84, 0x29, 0x11, 0x32, 0x60, 0x2c, 0x04, 0x67, 0xeb, 0x9e, 0x48, 0x8c, - 0x25, 0xc1, 0xc4, 0x44, 0xbd, 0xc8, 0x9e, 0x48, 0xfc, 0x87, 0x53, 0xe2, 0xc1, 0x8b, 0x99, 0x2d, - 0x93, 0xb2, 0xb2, 0xcb, 0x2c, 0x33, 0xb3, 0x82, 0x9e, 0xfc, 0x08, 0x5e, 0xbc, 0x78, 0xf3, 0xe6, - 0x47, 0xe1, 0xc8, 0x91, 0x78, 0xd8, 0xc8, 0x72, 0x31, 0x3d, 0xf1, 0x11, 0xcc, 0xcc, 0xec, 0xb6, - 0x85, 0x36, 0x35, 0xc4, 0xdb, 0xbc, 0xdf, 0x7b, 0xbf, 0xf7, 0x7b, 0xef, 0xed, 0x7b, 0x0b, 0xeb, - 0x87, 0x29, 0x13, 0x9f, 0x5a, 0x89, 0xe0, 0x8a, 0xa3, 0xba, 0x12, 0x61, 0x92, 0x30, 0x71, 0x44, - 0x05, 0x5b, 0x59, 0xea, 0xf2, 0x2e, 0x37, 0xf8, 0xba, 0x7e, 0xd9, 0x90, 0x95, 0xc7, 0xdd, 0x50, - 0xed, 0xa5, 0x41, 0xab, 0xc3, 0xe3, 0xf5, 0x0e, 0x17, 0x8a, 0x1d, 0x27, 0x82, 0x7f, 0x60, 0x1d, - 0x55, 0x58, 0xeb, 0xc9, 0x7e, 0xb7, 0x74, 0x04, 0xc5, 0xc3, 0x52, 0xbd, 0xef, 0x15, 0x38, 0xd7, - 0xa6, 0x71, 0x12, 0xb1, 0xb6, 0x12, 0x8c, 0xc6, 0xe8, 0x18, 0xd6, 0x22, 0x1a, 0xb0, 0x48, 0x36, - 0x40, 0xb3, 0xba, 0x56, 0xdf, 0x58, 0x6c, 0x95, 0xc4, 0xd6, 0x0b, 0x8d, 0x6f, 0xd3, 0x50, 0xf8, - 0xcf, 0x4f, 0x32, 0xd7, 0xf9, 0x95, 0xb9, 0x37, 0x12, 0xb6, 0xfc, 0xcd, 0x5d, 0x9a, 0x28, 0x26, - 0x7a, 0x99, 0x5b, 0x8b, 0x99, 0x12, 0x61, 0x87, 0x14, 0x7a, 0xe8, 0x09, 0x9c, 0x91, 0xa6, 0x12, - 0xd9, 0xa8, 0x18, 0xe9, 0x85, 0x81, 0xb4, 0x2d, 0xd1, 0xbf, 0xa5, 0x75, 0x35, 0xf5, 0x23, 0x8d, - 0x52, 0x26, 0x49, 0x49, 0x40, 0x3b, 0x10, 0xee, 0x85, 0x52, 0xf1, 0xae, 0xa0, 0xb1, 0x6c, 0x54, - 0x0d, 0xbd, 0xd9, 0x1a, 0x9a, 0x5c, 0x91, 0x61, 0xab, 0x0c, 0x32, 0x6d, 0xa0, 0x22, 0xdd, 0x10, - 0x97, 0x0c, 0xbd, 0xbd, 0xcf, 0x70, 0x71, 0x0c, 0x0d, 0xdd, 0x83, 0x73, 0x2a, 0x8c, 0x99, 0x54, - 0x34, 0x4e, 0xde, 0xc7, 0x7a, 0x50, 0x60, 0xad, 0x4a, 0xea, 0x7d, 0xec, 0xa5, 0x44, 0xcf, 0xe0, - 0x6c, 0x3f, 0x4f, 0xa3, 0xd2, 0x04, 0x6b, 0xf5, 0x8d, 0xd5, 0x49, 0xe5, 0xf8, 0x53, 0xba, 0x14, - 0x32, 0x20, 0x79, 0x87, 0x70, 0xfe, 0x5a, 0x0c, 0x5a, 0x82, 0xd3, 0x1d, 0x9e, 0x1e, 0x28, 0x23, - 0x08, 0x88, 0x35, 0xd0, 0x02, 0xac, 0xca, 0xd4, 0x8a, 0x00, 0xa2, 0x9f, 0xe8, 0x11, 0x9c, 0x09, - 0xd2, 0xce, 0x3e, 0x53, 0xe5, 0x24, 0xae, 0x4a, 0x0f, 0x44, 0x4d, 0x10, 0x29, 0x83, 0x3d, 0x09, - 0xe7, 0xaf, 0xf9, 0x10, 0x86, 0x30, 0xe0, 0xe9, 0xc1, 0x2e, 0x15, 0x21, 0xb3, 0x8d, 0x4e, 0x93, - 0x21, 0x44, 0x97, 0x14, 0xf1, 0x23, 0x26, 0x0a, 0x79, 0x6b, 0x68, 0x34, 0xd5, 0x72, 0x8d, 0xaa, - 0x45, 0x8d, 0x31, 0x28, 0x7f, 0x6a, 0xa8, 0x7c, 0x2f, 0x86, 0x77, 0xb6, 0x05, 0x8f, 0x99, 0xda, - 0x63, 0xa9, 0x24, 0x4c, 0x26, 0xfc, 0x40, 0xb2, 0xb6, 0xa2, 0x4a, 0x22, 0x32, 0x58, 0x08, 0x60, - 0x46, 0x78, 0xff, 0x4a, 0x1f, 0x63, 0x68, 0x36, 0xda, 0xb0, 0xfd, 0x7a, 0x2f, 0x73, 0x4b, 0x7e, - 0x7f, 0x51, 0xbc, 0x6f, 0x15, 0x88, 0x27, 0x13, 0xd1, 0x6b, 0x78, 0x5b, 0x71, 0x45, 0xa3, 0x37, - 0xfa, 0x08, 0x69, 0x10, 0x95, 0x5e, 0xfb, 0x9d, 0xfd, 0xe5, 0x5e, 0xe6, 0x8e, 0x0f, 0x20, 0xe3, - 0x61, 0xf4, 0x03, 0xc0, 0xd5, 0xb1, 0x9e, 0x6d, 0x26, 0xda, 0x8a, 0x25, 0xc5, 0xba, 0x3f, 0xfd, - 0x47, 0x77, 0xd7, 0xd9, 0xa6, 0xda, 0x22, 0x85, 0xdf, 0xec, 0x65, 0xee, 0x44, 0x11, 0x32, 0xd1, - 0xeb, 0x85, 0xf0, 0x86, 0x8a, 0xfa, 0x73, 0x9a, 0x2b, 0x2c, 0xd6, 0xdf, 0x1a, 0x23, 0xb7, 0x51, - 0x19, 0xb9, 0x0d, 0x6f, 0x07, 0x36, 0x46, 0xa5, 0xb6, 0x18, 0xdd, 0x65, 0x02, 0x2d, 0xc3, 0xa9, - 0x57, 0x34, 0xb6, 0x39, 0x67, 0xfd, 0xe9, 0x5e, 0xe6, 0x82, 0x07, 0xc4, 0x40, 0xe8, 0x2e, 0xac, - 0xbd, 0x35, 0x57, 0x6f, 0xc6, 0xd5, 0x77, 0x16, 0xa0, 0xd7, 0xbe, 0xba, 0x47, 0x87, 0x29, 0x93, - 0xea, 0x7f, 0x93, 0xfa, 0x9b, 0xa7, 0xe7, 0xd8, 0x39, 0x3b, 0xc7, 0xce, 0xe5, 0x39, 0x06, 0x5f, - 0x72, 0x0c, 0x7e, 0xe6, 0x18, 0x9c, 0xe4, 0x18, 0x9c, 0xe6, 0x18, 0xfc, 0xce, 0x31, 0xf8, 0x93, - 0x63, 0xe7, 0x32, 0xc7, 0xe0, 0xeb, 0x05, 0x76, 0x4e, 0x2f, 0xb0, 0x73, 0x76, 0x81, 0x9d, 0x77, - 0xc3, 0x7f, 0xec, 0xa0, 0x66, 0xfe, 0xb3, 0x0f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x33, 0x14, - 0x72, 0x0f, 0xd4, 0x05, 0x00, 0x00, +func (m *PrometheusQueryResult) GetRawBytes() []byte { + if x, ok := m.GetResult().(*PrometheusQueryResult_RawBytes); ok { + return x.RawBytes + } + return nil } -func (this *SampleStream) Equal(that interface{}) bool { - if that == nil { - return this == nil +func (m *PrometheusQueryResult) GetMatrix() *Matrix { + if x, ok := m.GetResult().(*PrometheusQueryResult_Matrix); ok { + return x.Matrix } + return nil +} - that1, ok := that.(*SampleStream) - if !ok { - that2, ok := that.(SampleStream) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false +// XXX_OneofWrappers is for the internal use of the proto package. +func (*PrometheusQueryResult) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*PrometheusQueryResult_Vector)(nil), + (*PrometheusQueryResult_RawBytes)(nil), + (*PrometheusQueryResult_Matrix)(nil), } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false +} + +type Vector struct { + Samples []*Sample `protobuf:"bytes,1,rep,name=samples,proto3" json:"samples,omitempty"` +} + +func (m *Vector) Reset() { *m = Vector{} } +func (*Vector) ProtoMessage() {} +func (*Vector) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{13} +} +func (m *Vector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Vector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Vector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if len(this.Samples) != len(that1.Samples) { - return false +} +func (m *Vector) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vector.Merge(m, src) +} +func (m *Vector) XXX_Size() int { + return m.Size() +} +func (m *Vector) XXX_DiscardUnknown() { + xxx_messageInfo_Vector.DiscardUnknown(m) +} + +var xxx_messageInfo_Vector proto.InternalMessageInfo + +func (m *Vector) GetSamples() []*Sample { + if m != nil { + return m.Samples } - for i := range this.Samples { - if !this.Samples[i].Equal(&that1.Samples[i]) { - return false - } + return nil +} + +type Sample struct { + Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"metric"` + Sample *cortexpb.Sample `protobuf:"bytes,2,opt,name=sample,proto3" json:"value"` + Histogram *SampleHistogramPair `protobuf:"bytes,3,opt,name=histogram,proto3" json:"histogram"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{14} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetSample() *cortexpb.Sample { + if m != nil { + return m.Sample + } + return nil +} + +func (m *Sample) GetHistogram() *SampleHistogramPair { + if m != nil { + return m.Histogram + } + return nil +} + +type Matrix struct { + SampleStreams []SampleStream `protobuf:"bytes,1,rep,name=sampleStreams,proto3" json:"sampleStreams"` +} + +func (m *Matrix) Reset() { *m = Matrix{} } +func (*Matrix) ProtoMessage() {} +func (*Matrix) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{15} +} +func (m *Matrix) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Matrix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Matrix.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Matrix) XXX_Merge(src proto.Message) { + xxx_messageInfo_Matrix.Merge(m, src) +} +func (m *Matrix) XXX_Size() int { + return m.Size() +} +func (m *Matrix) XXX_DiscardUnknown() { + xxx_messageInfo_Matrix.DiscardUnknown(m) +} + +var xxx_messageInfo_Matrix proto.InternalMessageInfo + +func (m *Matrix) GetSampleStreams() []SampleStream { + if m != nil { + return m.SampleStreams + } + return nil +} + +func init() { + proto.RegisterType((*PrometheusResponse)(nil), "tripperware.PrometheusResponse") + proto.RegisterType((*PrometheusData)(nil), "tripperware.PrometheusData") + proto.RegisterType((*CachedResponse)(nil), "tripperware.CachedResponse") + proto.RegisterType((*Extent)(nil), "tripperware.Extent") + proto.RegisterType((*SampleStream)(nil), "tripperware.SampleStream") + proto.RegisterType((*SampleHistogramPair)(nil), "tripperware.SampleHistogramPair") + proto.RegisterType((*SampleHistogram)(nil), "tripperware.SampleHistogram") + proto.RegisterType((*HistogramBucket)(nil), "tripperware.HistogramBucket") + proto.RegisterType((*PrometheusResponseStats)(nil), "tripperware.PrometheusResponseStats") + proto.RegisterType((*PrometheusResponseSamplesStats)(nil), "tripperware.PrometheusResponseSamplesStats") + proto.RegisterType((*PrometheusResponseQueryableSamplesStatsPerStep)(nil), "tripperware.PrometheusResponseQueryableSamplesStatsPerStep") + proto.RegisterType((*PrometheusResponseHeader)(nil), "tripperware.PrometheusResponseHeader") + proto.RegisterType((*PrometheusQueryResult)(nil), "tripperware.PrometheusQueryResult") + proto.RegisterType((*Vector)(nil), "tripperware.Vector") + proto.RegisterType((*Sample)(nil), "tripperware.Sample") + proto.RegisterType((*Matrix)(nil), "tripperware.Matrix") +} + +func init() { proto.RegisterFile("query.proto", fileDescriptor_5c6ac9b241082464) } + +var fileDescriptor_5c6ac9b241082464 = []byte{ + // 1173 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0x4b, 0x6f, 0x1b, 0xd5, + 0x17, 0xf7, 0xf8, 0x31, 0x71, 0x8e, 0xd3, 0xa4, 0xff, 0x9b, 0x3e, 0x9c, 0xfe, 0xcb, 0x8c, 0x19, + 0x81, 0x14, 0x04, 0x71, 0xa4, 0x20, 0xa8, 0x00, 0xa9, 0x22, 0x03, 0x81, 0x50, 0x28, 0x49, 0x6f, + 0xa2, 0x22, 0xb1, 0xa9, 0xae, 0xed, 0x8b, 0x33, 0xc4, 0xe3, 0x99, 0xde, 0xb9, 0xd3, 0xc4, 0xac, + 0xf8, 0x04, 0x88, 0x0d, 0x1b, 0x24, 0x16, 0xec, 0x58, 0xf0, 0x41, 0xb2, 0xcc, 0xb2, 0x42, 0x62, + 0x20, 0xce, 0x06, 0xcd, 0xaa, 0x1f, 0x01, 0xdd, 0xc7, 0xd8, 0xe3, 0xc4, 0x49, 0xd4, 0x15, 0x1b, + 0x67, 0xee, 0x39, 0xbf, 0xf3, 0xfa, 0xdd, 0x73, 0xce, 0x0d, 0xd4, 0x9e, 0xc6, 0x94, 0x0d, 0x9a, + 0x21, 0x0b, 0x78, 0x80, 0x6a, 0x9c, 0x79, 0x61, 0x48, 0xd9, 0x01, 0x61, 0xf4, 0xce, 0x8d, 0x6e, + 0xd0, 0x0d, 0xa4, 0x7c, 0x55, 0x7c, 0x29, 0xc8, 0x1d, 0xab, 0x1b, 0x04, 0xdd, 0x1e, 0x5d, 0x95, + 0xa7, 0x56, 0xfc, 0xcd, 0x6a, 0x27, 0x66, 0x84, 0x7b, 0x41, 0x5f, 0xeb, 0x97, 0xce, 0xea, 0x49, + 0x5f, 0x7b, 0xbf, 0xf3, 0x5e, 0xd7, 0xe3, 0x7b, 0x71, 0xab, 0xd9, 0x0e, 0xfc, 0xd5, 0x76, 0xc0, + 0x38, 0x3d, 0x0c, 0x59, 0xf0, 0x2d, 0x6d, 0x73, 0x7d, 0x5a, 0x0d, 0xf7, 0xbb, 0x99, 0xa2, 0xa5, + 0x3f, 0x94, 0xa9, 0xf3, 0x57, 0x11, 0xd0, 0x36, 0x0b, 0x7c, 0xca, 0xf7, 0x68, 0x1c, 0x61, 0x1a, + 0x85, 0x41, 0x3f, 0xa2, 0xc8, 0x01, 0x73, 0x87, 0x13, 0x1e, 0x47, 0x75, 0xa3, 0x61, 0x2c, 0xcf, + 0xba, 0x90, 0x26, 0xb6, 0x19, 0x49, 0x09, 0xd6, 0x1a, 0xf4, 0x29, 0x94, 0x3f, 0x26, 0x9c, 0xd4, + 0x8b, 0x0d, 0x63, 0xb9, 0xb6, 0xf6, 0xff, 0x66, 0xae, 0xc4, 0xe6, 0xd8, 0xa5, 0x80, 0xb8, 0xb7, + 0x8e, 0x12, 0xbb, 0x90, 0x26, 0xf6, 0x7c, 0x87, 0x70, 0xf2, 0x56, 0xe0, 0x7b, 0x9c, 0xfa, 0x21, + 0x1f, 0x60, 0xe9, 0x00, 0xbd, 0x03, 0xb3, 0x1b, 0x8c, 0x05, 0x6c, 0x77, 0x10, 0xd2, 0x7a, 0x49, + 0xc6, 0xbb, 0x9d, 0x26, 0xf6, 0x22, 0xcd, 0x84, 0x39, 0x8b, 0x31, 0x12, 0xbd, 0x01, 0x15, 0x79, + 0xa8, 0x97, 0xa5, 0xc9, 0x62, 0x9a, 0xd8, 0x0b, 0xd2, 0x24, 0x07, 0x57, 0x08, 0xf4, 0x09, 0xcc, + 0x6c, 0x52, 0xd2, 0xa1, 0x2c, 0xaa, 0x57, 0x1a, 0xa5, 0xe5, 0xda, 0xda, 0xeb, 0x17, 0x64, 0x9b, + 0x11, 0xa0, 0xd0, 0x6e, 0x25, 0x4d, 0x6c, 0x63, 0x05, 0x67, 0xc6, 0x68, 0x0d, 0xaa, 0x5f, 0x11, + 0xd6, 0xf7, 0xfa, 0xdd, 0xa8, 0x6e, 0x36, 0x4a, 0xcb, 0xb3, 0xee, 0xad, 0x34, 0xb1, 0xd1, 0x81, + 0x96, 0xe5, 0x02, 0x8f, 0x70, 0xce, 0x9f, 0x06, 0xcc, 0x4f, 0xd2, 0x81, 0x9a, 0x00, 0x98, 0x46, + 0x71, 0x8f, 0xcb, 0x8a, 0x15, 0xc3, 0xf3, 0x69, 0x62, 0x03, 0x1b, 0x49, 0x71, 0x0e, 0x81, 0x1e, + 0x80, 0xa9, 0x4e, 0x9a, 0x6b, 0xe7, 0x82, 0xec, 0x1f, 0x89, 0x8e, 0x53, 0x48, 0x77, 0x5e, 0x53, + 0x6e, 0x2a, 0x9f, 0x58, 0x7b, 0x40, 0x5b, 0x50, 0x11, 0xf7, 0x18, 0x49, 0xa2, 0x6b, 0x6b, 0xaf, + 0x5d, 0x41, 0x84, 0xb8, 0xeb, 0x48, 0x71, 0x2b, 0xcd, 0xf2, 0xdc, 0x4a, 0x81, 0xb3, 0x0f, 0xf3, + 0x1f, 0x91, 0xf6, 0x1e, 0xed, 0x8c, 0x9a, 0x67, 0x09, 0x4a, 0xfb, 0x74, 0xa0, 0xeb, 0x9a, 0x49, + 0x13, 0x5b, 0x1c, 0xb1, 0xf8, 0x41, 0xf7, 0x61, 0x86, 0x1e, 0x72, 0xda, 0xe7, 0x51, 0xbd, 0x28, + 0x2f, 0x62, 0x71, 0x22, 0xfe, 0x86, 0xd4, 0xb9, 0x0b, 0x3a, 0xf7, 0x0c, 0x8b, 0xb3, 0x0f, 0xe7, + 0x77, 0x03, 0x4c, 0x05, 0x42, 0xb6, 0x2c, 0x84, 0x71, 0x19, 0xa7, 0xe4, 0xce, 0xa6, 0x89, 0xad, + 0x04, 0x58, 0xfd, 0x11, 0x69, 0xd0, 0x7e, 0x47, 0x52, 0x56, 0x52, 0x69, 0xd0, 0x7e, 0x07, 0x8b, + 0x1f, 0xd4, 0x80, 0x2a, 0x67, 0xa4, 0x4d, 0x9f, 0x78, 0x1d, 0xdd, 0x3d, 0xd9, 0x4d, 0x4b, 0xf1, + 0x67, 0x1d, 0x74, 0x1f, 0xaa, 0x4c, 0xd7, 0x53, 0xaf, 0x48, 0xa6, 0x6e, 0x34, 0xd5, 0x00, 0x36, + 0xb3, 0x01, 0x6c, 0xae, 0xf7, 0x07, 0xee, 0x5c, 0x9a, 0xd8, 0x23, 0x24, 0x1e, 0x7d, 0x3d, 0x28, + 0x57, 0x4b, 0xd7, 0xcb, 0xce, 0xcf, 0x45, 0x98, 0xdb, 0x21, 0x7e, 0xd8, 0xa3, 0x3b, 0x9c, 0x51, + 0xe2, 0xa3, 0x43, 0x30, 0x7b, 0xa4, 0x45, 0x7b, 0x62, 0xae, 0x54, 0xf9, 0xd9, 0x58, 0x36, 0xbf, + 0x10, 0xf2, 0x6d, 0xe2, 0x31, 0xf7, 0x73, 0x51, 0xfe, 0x1f, 0x89, 0xfd, 0x52, 0x63, 0xad, 0xec, + 0xd7, 0x3b, 0x24, 0xe4, 0x94, 0x89, 0x7b, 0xf7, 0x29, 0x67, 0x5e, 0x1b, 0xeb, 0x78, 0xe8, 0x7d, + 0x98, 0x89, 0x64, 0x26, 0x19, 0xf3, 0xd7, 0xc7, 0xa1, 0x55, 0x8a, 0xe3, 0x96, 0x79, 0x46, 0x7a, + 0x31, 0x8d, 0x70, 0x66, 0x80, 0x76, 0x01, 0xf6, 0xbc, 0x88, 0x07, 0x5d, 0x46, 0x7c, 0xd1, 0x38, + 0xc2, 0xbc, 0x31, 0x71, 0x71, 0xca, 0xc3, 0x66, 0x06, 0x92, 0x65, 0x20, 0xed, 0x2e, 0x67, 0x8b, + 0x73, 0xdf, 0xce, 0x77, 0xb0, 0x38, 0xc5, 0x0c, 0xbd, 0x0a, 0x73, 0xdc, 0xf3, 0x69, 0xc4, 0x89, + 0x1f, 0x3e, 0xf1, 0xd5, 0x02, 0x2a, 0xe1, 0xda, 0x48, 0xf6, 0x30, 0x42, 0x1f, 0xc2, 0xec, 0xc8, + 0x8f, 0x1e, 0x89, 0xbb, 0x97, 0xa5, 0xe3, 0x96, 0x45, 0x2a, 0x78, 0x6c, 0xe4, 0x3c, 0x85, 0x85, + 0x33, 0x18, 0x74, 0x03, 0x2a, 0xed, 0x20, 0xee, 0xab, 0x7e, 0x32, 0xb0, 0x3a, 0xa0, 0xeb, 0x50, + 0x8a, 0x62, 0x15, 0xc4, 0xc0, 0xe2, 0x13, 0xbd, 0x0b, 0x33, 0xad, 0xb8, 0xbd, 0x4f, 0x79, 0xc6, + 0xc4, 0x64, 0xe8, 0x71, 0x50, 0x09, 0xc2, 0x19, 0xd8, 0x89, 0x60, 0xe1, 0x8c, 0x0e, 0x59, 0x00, + 0xad, 0x20, 0xee, 0x77, 0x08, 0xf3, 0xa8, 0x2a, 0xb4, 0x82, 0x73, 0x12, 0x91, 0x52, 0x2f, 0x38, + 0xa0, 0x4c, 0x87, 0x57, 0x07, 0x21, 0x8d, 0x45, 0x38, 0x39, 0xc1, 0x06, 0x56, 0x87, 0x71, 0xfa, + 0xe5, 0x5c, 0xfa, 0x8e, 0x0f, 0xb7, 0x2f, 0x98, 0x69, 0x84, 0xc7, 0x0d, 0x61, 0x48, 0x0a, 0xdf, + 0xbc, 0x6a, 0x15, 0x28, 0xb4, 0xda, 0x08, 0x35, 0x31, 0x9e, 0xda, 0x7e, 0xd4, 0x28, 0xce, 0x4f, + 0x45, 0xb0, 0x2e, 0x37, 0x44, 0x5b, 0x70, 0x93, 0x07, 0x9c, 0xf4, 0xe4, 0xae, 0x22, 0xad, 0x5e, + 0xa6, 0xd5, 0x63, 0xbc, 0x94, 0x26, 0xf6, 0x74, 0x00, 0x9e, 0x2e, 0x46, 0xbf, 0x1a, 0x70, 0x77, + 0xaa, 0x66, 0x9b, 0xb2, 0x1d, 0x4e, 0x43, 0xdd, 0xee, 0x1f, 0x5c, 0x51, 0xdd, 0x59, 0x6b, 0x99, + 0xad, 0x76, 0xe1, 0x36, 0xd2, 0xc4, 0xbe, 0x34, 0x08, 0xbe, 0x54, 0xeb, 0x78, 0xf0, 0x92, 0x11, + 0xc5, 0x75, 0xca, 0x29, 0xd4, 0xed, 0xaf, 0x0e, 0xe7, 0x66, 0xa3, 0x78, 0x6e, 0x36, 0x9c, 0x5d, + 0xa8, 0x5f, 0xf4, 0x9c, 0xa1, 0x25, 0x28, 0x7f, 0x49, 0xfc, 0xec, 0xc5, 0xd1, 0x2b, 0x4f, 0x8a, + 0xd0, 0x2b, 0x60, 0x3e, 0x96, 0x53, 0x2f, 0xe9, 0x1a, 0x29, 0xb5, 0xd0, 0xf9, 0xc5, 0x80, 0x9b, + 0x53, 0xdf, 0x19, 0xb4, 0x02, 0xe6, 0x33, 0xda, 0xe6, 0x01, 0xd3, 0x5d, 0x34, 0xb9, 0xd0, 0x1f, + 0x4b, 0xd5, 0x66, 0x01, 0x6b, 0x10, 0xba, 0x0b, 0x55, 0x46, 0x0e, 0xdc, 0x01, 0xa7, 0x2a, 0xfb, + 0xb9, 0xcd, 0x02, 0x1e, 0x49, 0x84, 0x33, 0x9f, 0x70, 0xe6, 0x1d, 0xea, 0xd7, 0x69, 0xd2, 0xd9, + 0x43, 0xa9, 0x12, 0xce, 0x14, 0xc8, 0xad, 0x82, 0x7e, 0xdd, 0x9c, 0x7b, 0x60, 0xaa, 0x50, 0x68, + 0x25, 0xdf, 0xd6, 0xe7, 0x5f, 0x18, 0xc5, 0xf5, 0xb8, 0x63, 0x7f, 0x28, 0x82, 0xa9, 0x64, 0xff, + 0xe1, 0x6e, 0xbe, 0x07, 0xa6, 0xca, 0x47, 0x2f, 0xb3, 0xf3, 0xab, 0xf9, 0xda, 0x51, 0x62, 0x1b, + 0xe2, 0x85, 0x93, 0x7d, 0x80, 0x35, 0x1c, 0x3d, 0xca, 0x2f, 0x42, 0x45, 0xd9, 0xd5, 0x7b, 0xf9, + 0x7f, 0xda, 0xd7, 0xd8, 0x34, 0xbf, 0x19, 0xb7, 0xc0, 0x54, 0x3c, 0xa3, 0x0d, 0xb8, 0x16, 0xe5, + 0xde, 0xae, 0x8c, 0x96, 0xa5, 0x29, 0x01, 0x14, 0x42, 0xaf, 0xd9, 0x49, 0x2b, 0x77, 0xfd, 0xf8, + 0xc4, 0x2a, 0x3c, 0x3f, 0xb1, 0x0a, 0x2f, 0x4e, 0x2c, 0xe3, 0xfb, 0xa1, 0x65, 0xfc, 0x36, 0xb4, + 0x8c, 0xa3, 0xa1, 0x65, 0x1c, 0x0f, 0x2d, 0xe3, 0xef, 0xa1, 0x65, 0xfc, 0x33, 0xb4, 0x0a, 0x2f, + 0x86, 0x96, 0xf1, 0xe3, 0xa9, 0x55, 0x38, 0x3e, 0xb5, 0x0a, 0xcf, 0x4f, 0xad, 0xc2, 0xd7, 0xf9, + 0x7f, 0x98, 0x5b, 0xa6, 0x7c, 0x72, 0xdf, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0xaa, 0xc5, 0x91, + 0xf0, 0x53, 0x0b, 0x00, 0x00, +} + +func (this *PrometheusResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PrometheusResponse) + if !ok { + that2, ok := that.(PrometheusResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Status != that1.Status { + return false + } + if !this.Data.Equal(&that1.Data) { + return false + } + if this.ErrorType != that1.ErrorType { + return false + } + if this.Error != that1.Error { + return false + } + if len(this.Headers) != len(that1.Headers) { + return false + } + for i := range this.Headers { + if !this.Headers[i].Equal(that1.Headers[i]) { + return false + } + } + if len(this.Warnings) != len(that1.Warnings) { + return false + } + for i := range this.Warnings { + if this.Warnings[i] != that1.Warnings[i] { + return false + } + } + return true +} +func (this *PrometheusData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PrometheusData) + if !ok { + that2, ok := that.(PrometheusData) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ResultType != that1.ResultType { + return false + } + if !this.Result.Equal(&that1.Result) { + return false + } + if !this.Stats.Equal(that1.Stats) { + return false + } + return true +} +func (this *CachedResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CachedResponse) + if !ok { + that2, ok := that.(CachedResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Key != that1.Key { + return false + } + if len(this.Extents) != len(that1.Extents) { + return false + } + for i := range this.Extents { + if !this.Extents[i].Equal(&that1.Extents[i]) { + return false + } + } + return true +} +func (this *Extent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Extent) + if !ok { + that2, ok := that.(Extent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Start != that1.Start { + return false + } + if this.End != that1.End { + return false + } + if this.TraceId != that1.TraceId { + return false + } + if !this.Response.Equal(that1.Response) { + return false + } + return true +} +func (this *SampleStream) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SampleStream) + if !ok { + that2, ok := that.(SampleStream) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(&that1.Samples[i]) { + return false + } } if len(this.Histograms) != len(that1.Histograms) { return false @@ -820,14 +1454,14 @@ func (this *PrometheusResponseHeader) Equal(that interface{}) bool { } return true } -func (this *PrometheusRequestHeader) Equal(that interface{}) bool { +func (this *PrometheusQueryResult) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*PrometheusRequestHeader) + that1, ok := that.(*PrometheusQueryResult) if !ok { - that2, ok := that.(PrometheusRequestHeader) + that2, ok := that.(PrometheusQueryResult) if ok { that1 = &that2 } else { @@ -839,28 +1473,254 @@ func (this *PrometheusRequestHeader) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Name != that1.Name { + if that1.Result == nil { + if this.Result != nil { + return false + } + } else if this.Result == nil { return false - } - if len(this.Values) != len(that1.Values) { + } else if !this.Result.Equal(that1.Result) { return false } - for i := range this.Values { - if this.Values[i] != that1.Values[i] { + return true +} +func (this *PrometheusQueryResult_Vector) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PrometheusQueryResult_Vector) + if !ok { + that2, ok := that.(PrometheusQueryResult_Vector) + if ok { + that1 = &that2 + } else { return false } } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Vector.Equal(that1.Vector) { + return false + } return true } -func (this *SampleStream) GoString() string { - if this == nil { - return "nil" +func (this *PrometheusQueryResult_RawBytes) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 7) - s = append(s, "&tripperware.SampleStream{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Samples != nil { - vs := make([]*cortexpb.Sample, len(this.Samples)) + + that1, ok := that.(*PrometheusQueryResult_RawBytes) + if !ok { + that2, ok := that.(PrometheusQueryResult_RawBytes) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.RawBytes, that1.RawBytes) { + return false + } + return true +} +func (this *PrometheusQueryResult_Matrix) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PrometheusQueryResult_Matrix) + if !ok { + that2, ok := that.(PrometheusQueryResult_Matrix) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Matrix.Equal(that1.Matrix) { + return false + } + return true +} +func (this *Vector) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Vector) + if !ok { + that2, ok := that.(Vector) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(that1.Samples[i]) { + return false + } + } + return true +} +func (this *Sample) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Sample) + if !ok { + that2, ok := that.(Sample) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if !this.Sample.Equal(that1.Sample) { + return false + } + if !this.Histogram.Equal(that1.Histogram) { + return false + } + return true +} +func (this *Matrix) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Matrix) + if !ok { + that2, ok := that.(Matrix) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.SampleStreams) != len(that1.SampleStreams) { + return false + } + for i := range this.SampleStreams { + if !this.SampleStreams[i].Equal(&that1.SampleStreams[i]) { + return false + } + } + return true +} +func (this *PrometheusResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&tripperware.PrometheusResponse{") + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + s = append(s, "Data: "+strings.Replace(this.Data.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ErrorType: "+fmt.Sprintf("%#v", this.ErrorType)+",\n") + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + if this.Headers != nil { + s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") + } + s = append(s, "Warnings: "+fmt.Sprintf("%#v", this.Warnings)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PrometheusData) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&tripperware.PrometheusData{") + s = append(s, "ResultType: "+fmt.Sprintf("%#v", this.ResultType)+",\n") + s = append(s, "Result: "+strings.Replace(this.Result.GoString(), `&`, ``, 1)+",\n") + if this.Stats != nil { + s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CachedResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&tripperware.CachedResponse{") + s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") + if this.Extents != nil { + vs := make([]*Extent, len(this.Extents)) + for i := range vs { + vs[i] = &this.Extents[i] + } + s = append(s, "Extents: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Extent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&tripperware.Extent{") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + if this.Response != nil { + s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SampleStream) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&tripperware.SampleStream{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Samples != nil { + vs := make([]*cortexpb.Sample, len(this.Samples)) for i := range vs { vs[i] = &this.Samples[i] } @@ -961,14 +1821,83 @@ func (this *PrometheusResponseHeader) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *PrometheusRequestHeader) GoString() string { +func (this *PrometheusQueryResult) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) - s = append(s, "&tripperware.PrometheusRequestHeader{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + s := make([]string, 0, 7) + s = append(s, "&tripperware.PrometheusQueryResult{") + if this.Result != nil { + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PrometheusQueryResult_Vector) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&tripperware.PrometheusQueryResult_Vector{` + + `Vector:` + fmt.Sprintf("%#v", this.Vector) + `}`}, ", ") + return s +} +func (this *PrometheusQueryResult_RawBytes) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&tripperware.PrometheusQueryResult_RawBytes{` + + `RawBytes:` + fmt.Sprintf("%#v", this.RawBytes) + `}`}, ", ") + return s +} +func (this *PrometheusQueryResult_Matrix) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&tripperware.PrometheusQueryResult_Matrix{` + + `Matrix:` + fmt.Sprintf("%#v", this.Matrix) + `}`}, ", ") + return s +} +func (this *Vector) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&tripperware.Vector{") + if this.Samples != nil { + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Sample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&tripperware.Sample{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Sample != nil { + s = append(s, "Sample: "+fmt.Sprintf("%#v", this.Sample)+",\n") + } + if this.Histogram != nil { + s = append(s, "Histogram: "+fmt.Sprintf("%#v", this.Histogram)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Matrix) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&tripperware.Matrix{") + if this.SampleStreams != nil { + vs := make([]*SampleStream, len(this.SampleStreams)) + for i := range vs { + vs[i] = &this.SampleStreams[i] + } + s = append(s, "SampleStreams: "+fmt.Sprintf("%#v", vs)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -980,7 +1909,7 @@ func valueToGoStringQuery(v interface{}, typ string) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -func (m *SampleStream) Marshal() (dAtA []byte, err error) { +func (m *PrometheusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -990,34 +1919,29 @@ func (m *SampleStream) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SampleStream) MarshalTo(dAtA []byte) (int, error) { +func (m *PrometheusResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PrometheusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Histograms) > 0 { - for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Warnings[iNdEx]))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x32 } } - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1025,27 +1949,44 @@ func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x2a } } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + } + if len(m.ErrorType) > 0 { + i -= len(m.ErrorType) + copy(dAtA[i:], m.ErrorType) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ErrorType))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SampleHistogramPair) Marshal() (dAtA []byte, err error) { +func (m *PrometheusData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1055,18 +1996,231 @@ func (m *SampleHistogramPair) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SampleHistogramPair) MarshalTo(dAtA []byte) (int, error) { +func (m *PrometheusData) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SampleHistogramPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PrometheusData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if m.Stats != nil { + { + size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ResultType) > 0 { + i -= len(m.ResultType) + copy(dAtA[i:], m.ResultType) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ResultType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CachedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extents) > 0 { + for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Extent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x22 + } + if m.End != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SampleStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SampleStream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SampleHistogramPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SampleHistogramPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SampleHistogramPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1327,7 +2481,7 @@ func (m *PrometheusResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *PrometheusRequestHeader) Marshal() (dAtA []byte, err error) { +func (m *PrometheusQueryResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1337,65 +2491,344 @@ func (m *PrometheusRequestHeader) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PrometheusRequestHeader) MarshalTo(dAtA []byte) (int, error) { +func (m *PrometheusQueryResult) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PrometheusRequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PrometheusQueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.Result != nil { + { + size := m.Result.Size() + i -= size + if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } } } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Name))) + return len(dAtA) - i, nil +} + +func (m *PrometheusQueryResult_Vector) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *PrometheusQueryResult_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Vector != nil { + { + size, err := m.Vector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } +func (m *PrometheusQueryResult_RawBytes) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *PrometheusQueryResult_RawBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RawBytes != nil { + i -= len(m.RawBytes) + copy(dAtA[i:], m.RawBytes) + i = encodeVarintQuery(dAtA, i, uint64(len(m.RawBytes))) + i-- + dAtA[i] = 0x12 } - dAtA[offset] = uint8(v) - return base + return len(dAtA) - i, nil } -func (m *SampleStream) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) +func (m *PrometheusQueryResult_Matrix) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *PrometheusQueryResult_Matrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Matrix != nil { + { + size, err := m.Matrix.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Samples) > 0 { - for _, e := range m.Samples { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } + return len(dAtA) - i, nil +} +func (m *Vector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Histograms) > 0 { + return dAtA[:n], nil +} + +func (m *Vector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Histogram != nil { + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Sample != nil { + { + size, err := m.Sample.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matrix) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matrix) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Matrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SampleStreams) > 0 { + for iNdEx := len(m.SampleStreams) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SampleStreams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PrometheusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Status) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.Data.Size() + n += 1 + l + sovQuery(uint64(l)) + l = len(m.ErrorType) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *PrometheusData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResultType) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *CachedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Extents) > 0 { + for _, e := range m.Extents { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *Extent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + sovQuery(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovQuery(uint64(m.End)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *SampleStream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if len(m.Histograms) > 0 { for _, e := range m.Histograms { l = e.Size() n += 1 + l + sovQuery(uint64(l)) @@ -1525,56 +2958,199 @@ func (m *PrometheusResponseHeader) Size() (n int) { return n } -func (m *PrometheusRequestHeader) Size() (n int) { +func (m *PrometheusQueryResult) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovQuery(uint64(l)) - } + if m.Result != nil { + n += m.Result.Size() } return n } -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *SampleStream) String() string { - if this == nil { - return "nil" - } - repeatedStringForSamples := "[]Sample{" - for _, f := range this.Samples { - repeatedStringForSamples += fmt.Sprintf("%v", f) + "," +func (m *PrometheusQueryResult_Vector) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForSamples += "}" - repeatedStringForHistograms := "[]SampleHistogramPair{" - for _, f := range this.Histograms { - repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "SampleHistogramPair", "SampleHistogramPair", 1), `&`, ``, 1) + "," + var l int + _ = l + if m.Vector != nil { + l = m.Vector.Size() + n += 1 + l + sovQuery(uint64(l)) } - repeatedStringForHistograms += "}" - s := strings.Join([]string{`&SampleStream{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Samples:` + repeatedStringForSamples + `,`, - `Histograms:` + repeatedStringForHistograms + `,`, - `}`, - }, "") - return s + return n } -func (this *SampleHistogramPair) String() string { - if this == nil { - return "nil" +func (m *PrometheusQueryResult_RawBytes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RawBytes != nil { + l = len(m.RawBytes) + n += 1 + l + sovQuery(uint64(l)) + } + return n +} +func (m *PrometheusQueryResult_Matrix) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matrix != nil { + l = m.Matrix.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} +func (m *Vector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Sample != nil { + l = m.Sample.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Histogram != nil { + l = m.Histogram.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *Matrix) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SampleStreams) > 0 { + for _, e := range m.SampleStreams { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PrometheusResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForHeaders := "[]*PrometheusResponseHeader{" + for _, f := range this.Headers { + repeatedStringForHeaders += strings.Replace(f.String(), "PrometheusResponseHeader", "PrometheusResponseHeader", 1) + "," + } + repeatedStringForHeaders += "}" + s := strings.Join([]string{`&PrometheusResponse{`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "PrometheusData", "PrometheusData", 1), `&`, ``, 1) + `,`, + `ErrorType:` + fmt.Sprintf("%v", this.ErrorType) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Headers:` + repeatedStringForHeaders + `,`, + `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, + `}`, + }, "") + return s +} +func (this *PrometheusData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PrometheusData{`, + `ResultType:` + fmt.Sprintf("%v", this.ResultType) + `,`, + `Result:` + strings.Replace(strings.Replace(this.Result.String(), "PrometheusQueryResult", "PrometheusQueryResult", 1), `&`, ``, 1) + `,`, + `Stats:` + strings.Replace(this.Stats.String(), "PrometheusResponseStats", "PrometheusResponseStats", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CachedResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForExtents := "[]Extent{" + for _, f := range this.Extents { + repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + "," + } + repeatedStringForExtents += "}" + s := strings.Join([]string{`&CachedResponse{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Extents:` + repeatedStringForExtents + `,`, + `}`, + }, "") + return s +} +func (this *Extent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Extent{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `End:` + fmt.Sprintf("%v", this.End) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "Any", "types.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SampleStream) String() string { + if this == nil { + return "nil" + } + repeatedStringForSamples := "[]Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSamples += "}" + repeatedStringForHistograms := "[]SampleHistogramPair{" + for _, f := range this.Histograms { + repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "SampleHistogramPair", "SampleHistogramPair", 1), `&`, ``, 1) + "," + } + repeatedStringForHistograms += "}" + s := strings.Join([]string{`&SampleStream{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Samples:` + repeatedStringForSamples + `,`, + `Histograms:` + repeatedStringForHistograms + `,`, + `}`, + }, "") + return s +} +func (this *SampleHistogramPair) String() string { + if this == nil { + return "nil" } s := strings.Join([]string{`&SampleHistogramPair{`, `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, @@ -1661,13 +3237,84 @@ func (this *PrometheusResponseHeader) String() string { }, "") return s } -func (this *PrometheusRequestHeader) String() string { +func (this *PrometheusQueryResult) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PrometheusRequestHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + s := strings.Join([]string{`&PrometheusQueryResult{`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `}`, + }, "") + return s +} +func (this *PrometheusQueryResult_Vector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PrometheusQueryResult_Vector{`, + `Vector:` + strings.Replace(fmt.Sprintf("%v", this.Vector), "Vector", "Vector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PrometheusQueryResult_RawBytes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PrometheusQueryResult_RawBytes{`, + `RawBytes:` + fmt.Sprintf("%v", this.RawBytes) + `,`, + `}`, + }, "") + return s +} +func (this *PrometheusQueryResult_Matrix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PrometheusQueryResult_Matrix{`, + `Matrix:` + strings.Replace(fmt.Sprintf("%v", this.Matrix), "Matrix", "Matrix", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Vector) String() string { + if this == nil { + return "nil" + } + repeatedStringForSamples := "[]*Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(f.String(), "Sample", "Sample", 1) + "," + } + repeatedStringForSamples += "}" + s := strings.Join([]string{`&Vector{`, + `Samples:` + repeatedStringForSamples + `,`, + `}`, + }, "") + return s +} +func (this *Sample) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sample{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Sample:` + strings.Replace(fmt.Sprintf("%v", this.Sample), "Sample", "cortexpb.Sample", 1) + `,`, + `Histogram:` + strings.Replace(this.Histogram.String(), "SampleHistogramPair", "SampleHistogramPair", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Matrix) String() string { + if this == nil { + return "nil" + } + repeatedStringForSampleStreams := "[]SampleStream{" + for _, f := range this.SampleStreams { + repeatedStringForSampleStreams += strings.Replace(strings.Replace(f.String(), "SampleStream", "SampleStream", 1), `&`, ``, 1) + "," + } + repeatedStringForSampleStreams += "}" + s := strings.Join([]string{`&Matrix{`, + `SampleStreams:` + repeatedStringForSampleStreams + `,`, `}`, }, "") return s @@ -1680,7 +3327,7 @@ func valueToStringQuery(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *SampleStream) Unmarshal(dAtA []byte) error { +func (m *PrometheusResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1703,15 +3350,47 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SampleStream: wiretype end group for non-group") + return fmt.Errorf("proto: PrometheusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SampleStream: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrometheusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1738,16 +3417,15 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ErrorType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -1757,29 +3435,892 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthQuery + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Headers = append(m.Headers, &PrometheusResponseHeader{}) + if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrometheusData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrometheusData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrometheusData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResultType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &PrometheusResponseStats{} + } + if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CachedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CachedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CachedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extents = append(m.Extents, Extent{}) + if err := m.Extents[len(m.Extents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Extent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Extent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Extent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &types.Any{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SampleStream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, cortexpb.Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, SampleHistogramPair{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SampleHistogramPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleHistogramPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleHistogramPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SampleHistogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleHistogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleHistogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Samples = append(m.Samples, cortexpb.Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) } - iNdEx = postIndex + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1806,8 +4347,8 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Histograms = append(m.Histograms, SampleHistogramPair{}) - if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Buckets = append(m.Buckets, &HistogramBucket{}) + if err := m.Buckets[len(m.Buckets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1835,7 +4376,7 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error { } return nil } -func (m *SampleHistogramPair) Unmarshal(dAtA []byte) error { +func (m *HistogramBucket) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1858,17 +4399,17 @@ func (m *SampleHistogramPair) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SampleHistogramPair: wiretype end group for non-group") + return fmt.Errorf("proto: HistogramBucket: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SampleHistogramPair: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HistogramBucket: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Boundaries", wireType) } - m.TimestampMs = 0 + m.Boundaries = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -1878,14 +4419,100 @@ func (m *SampleHistogramPair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimestampMs |= int64(b&0x7F) << shift + m.Boundaries |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Lower", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Lower = float64(math.Float64frombits(v)) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Upper", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Upper = float64(math.Float64frombits(v)) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrometheusResponseStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrometheusResponseStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrometheusResponseStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1912,7 +4539,10 @@ func (m *SampleHistogramPair) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Samples == nil { + m.Samples = &PrometheusResponseSamplesStats{} + } + if err := m.Samples.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1940,7 +4570,7 @@ func (m *SampleHistogramPair) Unmarshal(dAtA []byte) error { } return nil } -func (m *SampleHistogram) Unmarshal(dAtA []byte) error { +func (m *PrometheusResponseSamplesStats) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1963,37 +4593,34 @@ func (m *SampleHistogram) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SampleHistogram: wiretype end group for non-group") + return fmt.Errorf("proto: PrometheusResponseSamplesStats: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SampleHistogram: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrometheusResponseSamplesStats: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalQueryableSamples", wireType) } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF + m.TotalQueryableSamples = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalQueryableSamples |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Count = float64(math.Float64frombits(v)) case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum = float64(math.Float64frombits(v)) - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TotalQueryableSamplesPerStep", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2020,8 +4647,8 @@ func (m *SampleHistogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Buckets = append(m.Buckets, &HistogramBucket{}) - if err := m.Buckets[len(m.Buckets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TotalQueryableSamplesPerStep = append(m.TotalQueryableSamplesPerStep, &PrometheusResponseQueryableSamplesStatsPerStep{}) + if err := m.TotalQueryableSamplesPerStep[len(m.TotalQueryableSamplesPerStep)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2049,7 +4676,7 @@ func (m *SampleHistogram) Unmarshal(dAtA []byte) error { } return nil } -func (m *HistogramBucket) Unmarshal(dAtA []byte) error { +func (m *PrometheusResponseQueryableSamplesStatsPerStep) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2072,17 +4699,17 @@ func (m *HistogramBucket) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HistogramBucket: wiretype end group for non-group") + return fmt.Errorf("proto: PrometheusResponseQueryableSamplesStatsPerStep: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HistogramBucket: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrometheusResponseQueryableSamplesStatsPerStep: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Boundaries", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - m.Boundaries = 0 + m.Value = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2092,44 +4719,30 @@ func (m *HistogramBucket) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Boundaries |= int32(b&0x7F) << shift + m.Value |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Lower", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Lower = float64(math.Float64frombits(v)) - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Upper", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Upper = float64(math.Float64frombits(v)) - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Count = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -2154,7 +4767,7 @@ func (m *HistogramBucket) Unmarshal(dAtA []byte) error { } return nil } -func (m *PrometheusResponseStats) Unmarshal(dAtA []byte) error { +func (m *PrometheusResponseHeader) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2177,17 +4790,17 @@ func (m *PrometheusResponseStats) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponseStats: wiretype end group for non-group") + return fmt.Errorf("proto: PrometheusResponseHeader: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponseStats: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrometheusResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2197,27 +4810,55 @@ func (m *PrometheusResponseStats) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthQuery } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthQuery } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Samples == nil { - m.Samples = &PrometheusResponseSamplesStats{} + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) } - if err := m.Samples.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -2243,7 +4884,7 @@ func (m *PrometheusResponseStats) Unmarshal(dAtA []byte) error { } return nil } -func (m *PrometheusResponseSamplesStats) Unmarshal(dAtA []byte) error { +func (m *PrometheusQueryResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2266,17 +4907,17 @@ func (m *PrometheusResponseSamplesStats) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponseSamplesStats: wiretype end group for non-group") + return fmt.Errorf("proto: PrometheusQueryResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponseSamplesStats: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrometheusQueryResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalQueryableSamples", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) } - m.TotalQueryableSamples = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2286,14 +4927,63 @@ func (m *PrometheusResponseSamplesStats) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TotalQueryableSamples |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Vector{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &PrometheusQueryResult_Vector{v} + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalQueryableSamplesPerStep", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RawBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.Result = &PrometheusQueryResult_RawBytes{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matrix", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2320,10 +5010,11 @@ func (m *PrometheusResponseSamplesStats) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TotalQueryableSamplesPerStep = append(m.TotalQueryableSamplesPerStep, &PrometheusResponseQueryableSamplesStatsPerStep{}) - if err := m.TotalQueryableSamplesPerStep[len(m.TotalQueryableSamplesPerStep)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &Matrix{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Result = &PrometheusQueryResult_Matrix{v} iNdEx = postIndex default: iNdEx = preIndex @@ -2349,7 +5040,7 @@ func (m *PrometheusResponseSamplesStats) Unmarshal(dAtA []byte) error { } return nil } -func (m *PrometheusResponseQueryableSamplesStatsPerStep) Unmarshal(dAtA []byte) error { +func (m *Vector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2372,17 +5063,17 @@ func (m *PrometheusResponseQueryableSamplesStatsPerStep) Unmarshal(dAtA []byte) fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponseQueryableSamplesStatsPerStep: wiretype end group for non-group") + return fmt.Errorf("proto: Vector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponseQueryableSamplesStatsPerStep: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Vector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) } - m.Value = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2392,30 +5083,26 @@ func (m *PrometheusResponseQueryableSamplesStatsPerStep) Unmarshal(dAtA []byte) } b := dAtA[iNdEx] iNdEx++ - m.Value |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + if msglen < 0 { + return ErrInvalidLengthQuery } - m.TimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, &Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -2440,7 +5127,7 @@ func (m *PrometheusResponseQueryableSamplesStatsPerStep) Unmarshal(dAtA []byte) } return nil } -func (m *PrometheusResponseHeader) Unmarshal(dAtA []byte) error { +func (m *Sample) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2463,17 +5150,17 @@ func (m *PrometheusResponseHeader) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponseHeader: wiretype end group for non-group") + return fmt.Errorf("proto: Sample: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2483,29 +5170,31 @@ func (m *PrometheusResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthQuery } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthQuery } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2515,23 +5204,63 @@ func (m *PrometheusResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthQuery } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthQuery } if postIndex > l { return io.ErrUnexpectedEOF } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + if m.Sample == nil { + m.Sample = &cortexpb.Sample{} + } + if err := m.Sample.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &SampleHistogramPair{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2557,7 +5286,7 @@ func (m *PrometheusResponseHeader) Unmarshal(dAtA []byte) error { } return nil } -func (m *PrometheusRequestHeader) Unmarshal(dAtA []byte) error { +func (m *Matrix) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2580,17 +5309,17 @@ func (m *PrometheusRequestHeader) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrometheusRequestHeader: wiretype end group for non-group") + return fmt.Errorf("proto: Matrix: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusRequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Matrix: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SampleStreams", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2600,55 +5329,25 @@ func (m *PrometheusRequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthQuery } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthQuery } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.SampleStreams = append(m.SampleStreams, SampleStream{}) + if err := m.SampleStreams[len(m.SampleStreams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex diff --git a/pkg/querier/tripperware/query.proto b/pkg/querier/tripperware/query.proto index 9664fcf528..c2eb585cb3 100644 --- a/pkg/querier/tripperware/query.proto +++ b/pkg/querier/tripperware/query.proto @@ -5,11 +5,44 @@ package tripperware; option go_package = "tripperware"; import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/any.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; +message PrometheusResponse { + string Status = 1 [(gogoproto.jsontag) = "status"]; + PrometheusData Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; + string ErrorType = 3 [(gogoproto.jsontag) = "errorType,omitempty"]; + string Error = 4 [(gogoproto.jsontag) = "error,omitempty"]; + repeated tripperware.PrometheusResponseHeader Headers = 5 [(gogoproto.jsontag) = "-"]; + repeated string Warnings = 6 [(gogoproto.jsontag) = "warnings,omitempty"]; +} + +message PrometheusData { + string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; + tripperware.PrometheusQueryResult Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result"]; + tripperware.PrometheusResponseStats stats = 3 [(gogoproto.jsontag) = "stats,omitempty"]; +} + +message CachedResponse { + string key = 1 [(gogoproto.jsontag) = "key"]; + + // List of cached responses; non-overlapping and in order. + repeated Extent extents = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "extents"]; +} + +message Extent { + int64 start = 1 [(gogoproto.jsontag) = "start"]; + int64 end = 2 [(gogoproto.jsontag) = "end"]; + // reserved the previous key to ensure cache transition + reserved 3; + string trace_id = 4 [(gogoproto.jsontag) = "-"]; + google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"]; +} + message SampleStream { repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "metric", (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter"]; repeated cortexpb.Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "values"]; @@ -53,7 +86,24 @@ message PrometheusResponseHeader { repeated string Values = 2 [(gogoproto.jsontag) = "-"]; } -message PrometheusRequestHeader { - string Name = 1 [(gogoproto.jsontag) = "-"]; - repeated string Values = 2 [(gogoproto.jsontag) = "-"]; -} \ No newline at end of file +message PrometheusQueryResult { + oneof result { + Vector vector = 1; + bytes rawBytes = 2; + Matrix matrix = 3; + } +} + +message Vector { + repeated Sample samples = 1; +} + +message Sample { + repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "metric", (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter"]; + cortexpb.Sample sample = 2 [(gogoproto.nullable) = true, (gogoproto.jsontag) = "value"]; + SampleHistogramPair histogram = 3 [(gogoproto.nullable) = true, (gogoproto.jsontag) = "histogram"]; +} + +message Matrix { + repeated SampleStream sampleStreams = 1 [(gogoproto.nullable) = false]; +} diff --git a/pkg/querier/tripperware/query_attribute_matcher.go b/pkg/querier/tripperware/query_attribute_matcher.go index de92d5e2c6..169a50553f 100644 --- a/pkg/querier/tripperware/query_attribute_matcher.go +++ b/pkg/querier/tripperware/query_attribute_matcher.go @@ -14,7 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/validation" ) -const QueryRejectErrorMessage = "This query has been rejected by the service operator." +const QueryRejectErrorMessage = "This query does not perform well and has been rejected by the service operator." func rejectQueryOrSetPriority(r *http.Request, now time.Time, lookbackDelta time.Duration, limits Limits, userStr string, rejectedQueriesPerTenant *prometheus.CounterVec) error { if limits == nil || !(limits.QueryPriority(userStr).Enabled || limits.QueryRejection(userStr).Enabled) { @@ -86,89 +86,120 @@ func getOperation(r *http.Request) string { } func matchAttributeForExpressionQuery(attribute validation.QueryAttribute, op string, r *http.Request, query string, now time.Time, minTime, maxTime int64) bool { - if attribute.ApiType != "" && attribute.ApiType != op { - return false + matched := false + if attribute.ApiType != "" { + matched = true + if attribute.ApiType != op { + return false + } } - if attribute.Regex != "" && attribute.Regex != ".*" && attribute.Regex != ".+" { - if attribute.CompiledRegex != nil && !attribute.CompiledRegex.MatchString(query) { + if attribute.Regex != "" { + matched = true + if attribute.Regex != ".*" && attribute.Regex != ".+" && attribute.CompiledRegex != nil && !attribute.CompiledRegex.MatchString(query) { return false } } - if !isWithinTimeAttributes(attribute.TimeWindow, now, minTime, maxTime) { - return false + if attribute.TimeWindow.Start != 0 || attribute.TimeWindow.End != 0 { + matched = true + if !isWithinTimeAttributes(attribute.TimeWindow, now, minTime, maxTime) { + return false + } } - if !isWithinTimeRangeAttribute(attribute.TimeRangeLimit, minTime, maxTime) { - return false + if attribute.TimeRangeLimit.Min != 0 || attribute.TimeRangeLimit.Max != 0 { + matched = true + if !isWithinTimeRangeAttribute(attribute.TimeRangeLimit, minTime, maxTime) { + return false + } } - if op == "query_range" && !isWithinQueryStepLimit(attribute.QueryStepLimit, r) { - return false + if op == "query_range" && (attribute.QueryStepLimit.Min != 0 || attribute.QueryStepLimit.Max != 0) { + matched = true + if !isWithinQueryStepLimit(attribute.QueryStepLimit, r) { + return false + } } - if attribute.UserAgentRegex != "" && attribute.UserAgentRegex != ".*" && attribute.CompiledUserAgentRegex != nil { - if !attribute.CompiledUserAgentRegex.MatchString(r.Header.Get("User-Agent")) { + if attribute.UserAgentRegex != "" { + matched = true + if attribute.UserAgentRegex != ".*" && attribute.CompiledUserAgentRegex != nil && !attribute.CompiledUserAgentRegex.MatchString(r.Header.Get("User-Agent")) { return false } } - if attribute.DashboardUID != "" && attribute.DashboardUID != r.Header.Get("X-Dashboard-Uid") { - return false + if attribute.DashboardUID != "" { + matched = true + if attribute.DashboardUID != r.Header.Get("X-Dashboard-Uid") { + return false + } } - if attribute.PanelID != "" && attribute.PanelID != r.Header.Get("X-Panel-Id") { - return false + if attribute.PanelID != "" { + matched = true + if attribute.PanelID != r.Header.Get("X-Panel-Id") { + return false + } } - return true + return matched } func matchAttributeForMetadataQuery(attribute validation.QueryAttribute, op string, r *http.Request, now time.Time) bool { - if attribute.ApiType != "" && attribute.ApiType != op { - return false + matched := false + if attribute.ApiType != "" { + matched = true + if attribute.ApiType != op { + return false + } } if err := r.ParseForm(); err != nil { return false } - if attribute.Regex != "" && attribute.Regex != ".*" && attribute.CompiledRegex != nil { - atLeastOneMatched := false - for _, matcher := range r.Form["match[]"] { - if attribute.CompiledRegex.MatchString(matcher) { - atLeastOneMatched = true - break + if attribute.Regex != "" { + matched = true + if attribute.Regex != ".*" && attribute.CompiledRegex != nil { + atLeastOneMatched := false + for _, matcher := range r.Form["match[]"] { + if attribute.CompiledRegex.MatchString(matcher) { + atLeastOneMatched = true + break + } + } + if !atLeastOneMatched { + return false } - } - if !atLeastOneMatched { - return false } } startTime, _ := util.ParseTime(r.FormValue("start")) endTime, _ := util.ParseTime(r.FormValue("end")) - if !isWithinTimeAttributes(attribute.TimeWindow, now, startTime, endTime) { - return false + if attribute.TimeWindow.Start != 0 || attribute.TimeWindow.End != 0 { + matched = true + if !isWithinTimeAttributes(attribute.TimeWindow, now, startTime, endTime) { + return false + } } - if !isWithinTimeRangeAttribute(attribute.TimeRangeLimit, startTime, endTime) { - return false + if attribute.TimeRangeLimit.Min != 0 || attribute.TimeRangeLimit.Max != 0 { + matched = true + if !isWithinTimeRangeAttribute(attribute.TimeRangeLimit, startTime, endTime) { + return false + } } - if attribute.UserAgentRegex != "" && attribute.UserAgentRegex != ".*" && attribute.CompiledUserAgentRegex != nil { - if !attribute.CompiledUserAgentRegex.MatchString(r.Header.Get("User-Agent")) { + if attribute.UserAgentRegex != "" { + matched = true + if attribute.UserAgentRegex != ".*" && attribute.CompiledUserAgentRegex != nil && !attribute.CompiledUserAgentRegex.MatchString(r.Header.Get("User-Agent")) { return false } } - return true + return matched } func isWithinTimeAttributes(timeWindow validation.TimeWindow, now time.Time, startTime, endTime int64) bool { - if timeWindow.Start == 0 && timeWindow.End == 0 { - return true - } - if timeWindow.Start != 0 { startTimeThreshold := now.Add(-1 * time.Duration(timeWindow.Start).Abs()).Add(-1 * time.Minute).Truncate(time.Minute).UnixMilli() if startTime == 0 || startTime < startTimeThreshold { @@ -187,9 +218,6 @@ func isWithinTimeAttributes(timeWindow validation.TimeWindow, now time.Time, sta } func isWithinTimeRangeAttribute(limit validation.TimeRangeLimit, startTime, endTime int64) bool { - if limit.Min == 0 && limit.Max == 0 { - return true - } if startTime == 0 || endTime == 0 { return false @@ -208,9 +236,6 @@ func isWithinTimeRangeAttribute(limit validation.TimeRangeLimit, startTime, endT } func isWithinQueryStepLimit(queryStepLimit validation.QueryStepLimit, r *http.Request) bool { - if queryStepLimit.Min == 0 && queryStepLimit.Max == 0 { - return true - } step, err := util.ParseDurationMs(r.FormValue("step")) if err != nil { diff --git a/pkg/querier/tripperware/query_attribute_matcher_test.go b/pkg/querier/tripperware/query_attribute_matcher_test.go index dc3c204dd9..cbd1f949f8 100644 --- a/pkg/querier/tripperware/query_attribute_matcher_test.go +++ b/pkg/querier/tripperware/query_attribute_matcher_test.go @@ -99,6 +99,7 @@ func Test_rejectQueryOrSetPriorityShouldRejectIfMatches(t *testing.T) { type testCase struct { queryRejectionEnabled bool path string + headers http.Header expectedError error expectedPriority int64 rejectQueryAttribute validation.QueryAttribute @@ -149,10 +150,10 @@ func Test_rejectQueryOrSetPriorityShouldRejectIfMatches(t *testing.T) { }, }, - "should reject if query rejection enabled with step limit and subQuery step match": { + "should not reject if query rejection enabled with step limit and query type is not range query": { queryRejectionEnabled: true, path: "/api/v1/query?time=1536716898&query=avg_over_time%28rate%28node_cpu_seconds_total%5B1m%5D%29%5B10m%3A5s%5D%29", //avg_over_time(rate(node_cpu_seconds_total[1m])[10m:5s]) - expectedError: httpgrpc.Errorf(http.StatusUnprocessableEntity, QueryRejectErrorMessage), + expectedError: nil, rejectQueryAttribute: validation.QueryAttribute{ QueryStepLimit: validation.QueryStepLimit{ Min: model.Duration(time.Second * 5), @@ -211,12 +212,64 @@ func Test_rejectQueryOrSetPriorityShouldRejectIfMatches(t *testing.T) { CompiledRegex: regexp.MustCompile(".*sum.*"), }, }, + + "should reject if only dashboard and panelId properties provided and query has both headers matching the property": { + queryRejectionEnabled: true, + path: fmt.Sprintf("/api/v1/query?start=%d&end=%d&step=7s&query=%s", now.Add(-30*time.Minute).UnixMilli()/1000, now.Add(-20*time.Minute).UnixMilli()/1000, url.QueryEscape("count(sum(up))")), + headers: http.Header{ + "X-Dashboard-Uid": {"dashboard-uid"}, + "X-Panel-Id": {"pane"}, + }, + expectedError: httpgrpc.Errorf(http.StatusUnprocessableEntity, QueryRejectErrorMessage), + rejectQueryAttribute: validation.QueryAttribute{ + DashboardUID: "dashboard-uid", + PanelID: "pane", + }, + }, + + "should not reject if query_rejection properties only provides dashboard_uuid and panel_id but query doesn't have those header": { + queryRejectionEnabled: true, + path: fmt.Sprintf("/api/v1/series?start=%d&end=%d&step=7s&match[]=%s", now.Add(-30*time.Minute).UnixMilli()/1000, now.Add(-20*time.Minute).UnixMilli()/1000, url.QueryEscape("count(sum(up))")), + headers: http.Header{}, + expectedError: nil, + rejectQueryAttribute: validation.QueryAttribute{ + PanelID: "panel", + DashboardUID: "dash123", + }, + }, + + "should not reject if query_rejection properties only provides dashboard_uid and query doesn't have X-Dashboard-Uid header": { + queryRejectionEnabled: true, + path: fmt.Sprintf("/api/v1/series?start=%d&end=%d&step=7s&match[]=%s", now.Add(-30*time.Minute).UnixMilli()/1000, now.Add(-20*time.Minute).UnixMilli()/1000, url.QueryEscape("count(sum(up))")), + headers: http.Header{ + "X-Panel-Id": {"pane"}, + }, + expectedError: nil, + rejectQueryAttribute: validation.QueryAttribute{ + DashboardUID: "dash123", + }, + }, + + "should not reject if query_rejection properties only provides user agent regex and query doesn't have User-Agent header": { + queryRejectionEnabled: true, + path: fmt.Sprintf("/api/v1/series?start=%d&end=%d&step=7s&match[]=%s", now.Add(-30*time.Minute).UnixMilli()/1000, now.Add(-20*time.Minute).UnixMilli()/1000, url.QueryEscape("count(sum(up))")), + headers: http.Header{ + "X-Dashboard-Uid": {"dashboard-uid"}, + "X-Panel-Id": {"pane"}, + }, + expectedError: nil, + rejectQueryAttribute: validation.QueryAttribute{ + UserAgentRegex: "^goclient", + CompiledUserAgentRegex: regexp.MustCompile("^goclient"), + }, + }, } for testName, testData := range tests { t.Run(testName, func(t *testing.T) { req, err := http.NewRequest("GET", testData.path, http.NoBody) require.NoError(t, err) + req.Header = testData.headers reqStats, ctx := stats.ContextWithEmptyStats(context.Background()) req = req.WithContext(ctx) limits.queryRejection.Enabled = testData.queryRejectionEnabled @@ -257,10 +310,10 @@ func Test_matchAttributeForExpressionQueryShouldMatchRegex(t *testing.T) { query: "count(sum(up))", result: true, }, - "should hit if regex is an empty string": { + "should miss if regex is an empty string and is only provided property of query_rejection": { regex: "", query: "sum(up)", - result: true, + result: false, }, } @@ -526,15 +579,15 @@ func Test_matchAttributeForExpressionQueryHeadersShouldBeCheckedIfSet(t *testing } tests := map[string]testCase{ - "should not check any of them if attributes are empty (match)": { - expectedResult: true, + "should consider no match if no properties provided for query_attributes": { + expectedResult: false, }, - "should not check if attributes are empty even corresponding headers exist (match)": { + "should consider no match if no properties provided for query_attributes even corresponding headers exist": { headers: http.Header{ "X-Dashboard-Uid": {"dashboard-uid"}, "X-Panel-Id": {"panel-id"}, }, - expectedResult: true, + expectedResult: false, }, "should match all attributes if all set and all headers provided": { headers: http.Header{ @@ -564,14 +617,14 @@ func Test_matchAttributeForExpressionQueryHeadersShouldBeCheckedIfSet(t *testing PanelID: "panel-id", }, }, - "should not compare if values are empty (match)": { + "should ignore if query_rejection property values are empty therefore shouldn't match if only those parameters was provided": { headers: http.Header{ "X-Panel-Id": {""}, }, queryAttribute: validation.QueryAttribute{ PanelID: "", }, - expectedResult: true, + expectedResult: false, }, "should match if headers match provided attributes ": { headers: http.Header{ @@ -583,6 +636,14 @@ func Test_matchAttributeForExpressionQueryHeadersShouldBeCheckedIfSet(t *testing }, expectedResult: true, }, + "should not match if only dashboard and panel properties are specified and query missing those headers (metadata queries)": { + headers: http.Header{}, + queryAttribute: validation.QueryAttribute{ + DashboardUID: "dashboard", + PanelID: "pane", + }, + expectedResult: false, + }, } for testName, testData := range tests { @@ -620,10 +681,10 @@ func Test_matchAttributeForExpressionQueryShouldMatchUserAgentRegex(t *testing.T userAgentHeader: "grafana-agent/v0.19.0", result: true, }, - "should hit if regex is an empty string": { + "should miss if regex is an empty string and only provided property for query_attribute": { userAgentRegex: "", userAgentHeader: "grafana-agent/v0.19.0", - result: true, + result: false, }, } diff --git a/pkg/querier/tripperware/queryrange/limits.go b/pkg/querier/tripperware/queryrange/limits.go index a4d991aba8..4aa9318595 100644 --- a/pkg/querier/tripperware/queryrange/limits.go +++ b/pkg/querier/tripperware/queryrange/limits.go @@ -60,7 +60,7 @@ func (l limitsMiddleware) Do(ctx context.Context, r tripperware.Request) (trippe "redEnd", util.FormatTimeMillis(r.GetEnd()), "maxQueryLookback", maxQueryLookback) - return NewEmptyPrometheusResponse(), nil + return tripperware.NewEmptyPrometheusResponse(false), nil } if r.GetStart() < minStartTime { diff --git a/pkg/querier/tripperware/queryrange/limits_test.go b/pkg/querier/tripperware/queryrange/limits_test.go index 5ee78fbf0b..6c3e257986 100644 --- a/pkg/querier/tripperware/queryrange/limits_test.go +++ b/pkg/querier/tripperware/queryrange/limits_test.go @@ -74,7 +74,7 @@ func TestLimitsMiddleware_MaxQueryLookback(t *testing.T) { testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() - req := &PrometheusRequest{ + req := &tripperware.PrometheusRequest{ Start: util.TimeToMillis(testData.reqStartTime), End: util.TimeToMillis(testData.reqEndTime), } @@ -82,7 +82,7 @@ func TestLimitsMiddleware_MaxQueryLookback(t *testing.T) { limits := mockLimits{maxQueryLookback: testData.maxQueryLookback} middleware := NewLimitsMiddleware(limits, 5*time.Minute) - innerRes := NewEmptyPrometheusResponse() + innerRes := tripperware.NewEmptyPrometheusResponse(false) inner := &mockHandler{} inner.On("Do", mock.Anything, mock.Anything).Return(innerRes, nil) @@ -193,7 +193,7 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() - req := &PrometheusRequest{ + req := &tripperware.PrometheusRequest{ Query: testData.query, Start: util.TimeToMillis(testData.reqStartTime), End: util.TimeToMillis(testData.reqEndTime), @@ -205,7 +205,7 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { limits := mockLimits{maxQueryLength: testData.maxQueryLength} middleware := NewLimitsMiddleware(limits, 5*time.Minute) - innerRes := NewEmptyPrometheusResponse() + innerRes := tripperware.NewEmptyPrometheusResponse(false) inner := &mockHandler{} inner.On("Do", mock.Anything, mock.Anything).Return(innerRes, nil) diff --git a/pkg/querier/tripperware/queryrange/marshaling_test.go b/pkg/querier/tripperware/queryrange/marshaling_test.go index 981f4ce3d7..e126952c98 100644 --- a/pkg/querier/tripperware/queryrange/marshaling_test.go +++ b/pkg/querier/tripperware/queryrange/marshaling_test.go @@ -3,8 +3,7 @@ package queryrange import ( "bytes" "context" - "github.com/gogo/protobuf/proto" - io "io" + "io" "math/rand" "net/http" "testing" @@ -59,7 +58,7 @@ func BenchmarkPrometheusCodec_EncodeResponse(b *testing.B) { } } -func mockPrometheusResponse(numSeries, numSamplesPerSeries int) *PrometheusResponse { +func mockPrometheusResponse(numSeries, numSamplesPerSeries int) *tripperware.PrometheusResponse { stream := make([]tripperware.SampleStream, numSeries) for s := 0; s < numSeries; s++ { // Generate random samples. @@ -84,11 +83,17 @@ func mockPrometheusResponse(numSeries, numSamplesPerSeries int) *PrometheusRespo } } - return &PrometheusResponse{ + return &tripperware.PrometheusResponse{ Status: "success", - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: "vector", - Result: stream, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: stream, + }, + }, + }, }, } } diff --git a/pkg/querier/tripperware/queryrange/query_range.go b/pkg/querier/tripperware/queryrange/query_range.go index 51afddfca4..939532e415 100644 --- a/pkg/querier/tripperware/queryrange/query_range.go +++ b/pkg/querier/tripperware/queryrange/query_range.go @@ -7,7 +7,6 @@ import ( "io" "net/http" "net/url" - "sort" "strconv" "strings" "time" @@ -19,8 +18,6 @@ import ( "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/timestamp" - "github.com/thanos-io/thanos/pkg/strutil" "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/querier/tripperware" @@ -36,7 +33,6 @@ type Compression string const ( DisableCompression Compression = "" GzipCompression Compression = "gzip" - SnappyCompression Compression = "snappy" applicationProtobuf string = "application/x-protobuf" applicationJson string = "application/json" ) @@ -64,7 +60,7 @@ type prometheusCodec struct { func NewPrometheusCodec(sharded bool, c string, enableProtobuf bool) *prometheusCodec { //nolint:revive var compression Compression - if c == "gzip" || c == "snappy" { + if c == "gzip" { compression = Compression(c) } else { compression = DisableCompression @@ -76,119 +72,19 @@ func NewPrometheusCodec(sharded bool, c string, enableProtobuf bool) *prometheus } } -// WithStartEnd clones the current `PrometheusRequest` with a new `start` and `end` timestamp. -func (q *PrometheusRequest) WithStartEnd(start int64, end int64) tripperware.Request { - new := *q - new.Start = start - new.End = end - return &new -} - -// WithQuery clones the current `PrometheusRequest` with a new query. -func (q *PrometheusRequest) WithQuery(query string) tripperware.Request { - new := *q - new.Query = query - return &new -} - -// WithStats clones the current `PrometheusRequest` with a new stats. -func (q *PrometheusRequest) WithStats(stats string) tripperware.Request { - new := *q - new.Stats = stats - return &new -} - -// LogToSpan logs the current `PrometheusRequest` parameters to the specified span. -func (q *PrometheusRequest) LogToSpan(sp opentracing.Span) { - sp.LogFields( - otlog.String("query", q.GetQuery()), - otlog.String("start", timestamp.Time(q.GetStart()).String()), - otlog.String("end", timestamp.Time(q.GetEnd()).String()), - otlog.Int64("step (ms)", q.GetStep()), - ) -} - -type byFirstTime []*PrometheusResponse - -func (a byFirstTime) Len() int { return len(a) } -func (a byFirstTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byFirstTime) Less(i, j int) bool { return a[i].minTime() < a[j].minTime() } - -func (resp *PrometheusResponse) minTime() int64 { - result := resp.Data.Result - if len(result) == 0 { - return -1 - } - if len(result[0].Samples) == 0 { - return -1 - } - return result[0].Samples[0].TimestampMs -} - -func (resp *PrometheusResponse) HTTPHeaders() map[string][]string { - if resp != nil && resp.GetHeaders() != nil { - r := map[string][]string{} - for _, header := range resp.GetHeaders() { - if header != nil { - r[header.Name] = header.Values - } - } - - return r - } - return nil -} - -// NewEmptyPrometheusResponse returns an empty successful Prometheus query range response. -func NewEmptyPrometheusResponse() *PrometheusResponse { - return &PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{}, - }, - } -} - -func (c prometheusCodec) MergeResponse(ctx context.Context, _ tripperware.Request, responses ...tripperware.Response) (tripperware.Response, error) { +func (c prometheusCodec) MergeResponse(ctx context.Context, req tripperware.Request, responses ...tripperware.Response) (tripperware.Response, error) { sp, _ := opentracing.StartSpanFromContext(ctx, "QueryRangeResponse.MergeResponse") sp.SetTag("response_count", len(responses)) defer sp.Finish() if len(responses) == 0 { - return NewEmptyPrometheusResponse(), nil - } - - promResponses := make([]*PrometheusResponse, 0, len(responses)) - warnings := make([][]string, 0, len(responses)) - for _, res := range responses { - promResponses = append(promResponses, res.(*PrometheusResponse)) - if w := res.(*PrometheusResponse).Warnings; w != nil { - warnings = append(warnings, w) - } - } - - // Merge the responses. - sort.Sort(byFirstTime(promResponses)) - sampleStreams, err := matrixMerge(ctx, promResponses) - if err != nil { - return nil, err + return tripperware.NewEmptyPrometheusResponse(false), nil } - response := PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: model.ValMatrix.String(), - Result: sampleStreams, - Stats: statsMerge(c.sharded, promResponses), - }, - Warnings: strutil.MergeUnsortedSlices(warnings...), - } - - return &response, nil + return tripperware.MergeResponse(ctx, c.sharded, nil, responses...) } func (c prometheusCodec) DecodeRequest(_ context.Context, r *http.Request, forwardHeaders []string) (tripperware.Request, error) { - var result PrometheusRequest + result := tripperware.PrometheusRequest{Headers: map[string][]string{}} var err error result.Start, err = util.ParseTime(r.FormValue("start")) if err != nil { @@ -227,7 +123,7 @@ func (c prometheusCodec) DecodeRequest(_ context.Context, r *http.Request, forwa for _, header := range forwardHeaders { for h, hv := range r.Header { if strings.EqualFold(h, header) { - result.Headers = append(result.Headers, &tripperware.PrometheusRequestHeader{Name: h, Values: hv}) + result.Headers[h] = hv break } } @@ -244,7 +140,7 @@ func (c prometheusCodec) DecodeRequest(_ context.Context, r *http.Request, forwa } func (c prometheusCodec) EncodeRequest(ctx context.Context, r tripperware.Request) (*http.Request, error) { - promReq, ok := r.(*PrometheusRequest) + promReq, ok := r.(*tripperware.PrometheusRequest) if !ok { return nil, httpgrpc.Errorf(http.StatusBadRequest, "invalid request format") } @@ -261,13 +157,13 @@ func (c prometheusCodec) EncodeRequest(ctx context.Context, r tripperware.Reques } var h = http.Header{} - for _, hv := range promReq.Headers { - for _, v := range hv.Values { - h.Add(hv.Name, v) + for n, hv := range promReq.Headers { + for _, v := range hv { + h.Add(n, v) } } - if c.compression == SnappyCompression || c.compression == GzipCompression { + if c.compression == GzipCompression { h.Set("Accept-Encoding", string(c.compression)) } if c.enableProtobuf { @@ -305,7 +201,7 @@ func (c prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ } log.LogFields(otlog.Int("bytes", len(buf))) - var resp PrometheusResponse + var resp tripperware.PrometheusResponse if r.Header != nil && r.Header.Get("Content-Type") == applicationProtobuf { err = proto.Unmarshal(buf, &resp) } else { @@ -326,12 +222,15 @@ func (prometheusCodec) EncodeResponse(ctx context.Context, res tripperware.Respo sp, _ := opentracing.StartSpanFromContext(ctx, "APIResponse.ToHTTPResponse") defer sp.Finish() - a, ok := res.(*PrometheusResponse) + a, ok := res.(*tripperware.PrometheusResponse) if !ok { return nil, httpgrpc.Errorf(http.StatusInternalServerError, "invalid response format") } - sp.LogFields(otlog.Int("series", len(a.Data.Result))) + if a != nil { + m := a.Data.Result.GetMatrix() + sp.LogFields(otlog.Int("series", len(m.GetSampleStreams()))) + } b, err := json.Marshal(a) if err != nil { @@ -351,66 +250,6 @@ func (prometheusCodec) EncodeResponse(ctx context.Context, res tripperware.Respo return &resp, nil } -// statsMerge merge the stats from 2 responses -// this function is similar to matrixMerge -func statsMerge(shouldSumStats bool, resps []*PrometheusResponse) *tripperware.PrometheusResponseStats { - output := map[int64]*tripperware.PrometheusResponseQueryableSamplesStatsPerStep{} - hasStats := false - for _, resp := range resps { - if resp.Data.Stats == nil { - continue - } - - hasStats = true - if resp.Data.Stats.Samples == nil { - continue - } - - for _, s := range resp.Data.Stats.Samples.TotalQueryableSamplesPerStep { - if shouldSumStats { - if stats, ok := output[s.GetTimestampMs()]; ok { - stats.Value += s.Value - } else { - output[s.GetTimestampMs()] = s - } - } else { - output[s.GetTimestampMs()] = s - } - } - } - - if !hasStats { - return nil - } - return tripperware.StatsMerge(output) -} - -func matrixMerge(ctx context.Context, resps []*PrometheusResponse) ([]tripperware.SampleStream, error) { - output := make(map[string]tripperware.SampleStream) - for _, resp := range resps { - if err := ctx.Err(); err != nil { - return nil, err - } - if resp == nil { - continue - } - tripperware.MergeSampleStreams(output, resp.Data.GetResult()) - } - - keys := make([]string, 0, len(output)) - for key := range output { - keys = append(keys, key) - } - sort.Strings(keys) - - result := make([]tripperware.SampleStream, 0, len(output)) - for _, key := range keys { - result = append(result, output[key]) - } - - return result, nil -} - func encodeDurationMs(d int64) string { return strconv.FormatFloat(float64(d)/float64(time.Second/time.Millisecond), 'f', -1, 64) } diff --git a/pkg/querier/tripperware/queryrange/query_range_middlewares.go b/pkg/querier/tripperware/queryrange/query_range_middlewares.go index b84d0f83ed..3e02ced959 100644 --- a/pkg/querier/tripperware/queryrange/query_range_middlewares.go +++ b/pkg/querier/tripperware/queryrange/query_range_middlewares.go @@ -96,7 +96,7 @@ func Middlewares( var c cache.Cache if cfg.CacheResults { shouldCache := func(r tripperware.Request) bool { - if v, ok := r.(*PrometheusRequest); ok { + if v, ok := r.(*tripperware.PrometheusRequest); ok { return !v.CachingOptions.Disabled } return false diff --git a/pkg/querier/tripperware/queryrange/query_range_test.go b/pkg/querier/tripperware/queryrange/query_range_test.go index 9f0f3500de..0b4b6e42a9 100644 --- a/pkg/querier/tripperware/queryrange/query_range_test.go +++ b/pkg/querier/tripperware/queryrange/query_range_test.go @@ -92,25 +92,31 @@ func TestRequest(t *testing.T) { func TestResponse(t *testing.T) { t.Parallel() testCases := []struct { - promBody *PrometheusResponse + promBody *tripperware.PrometheusResponse jsonBody string expectedDecodeErr error cancelCtxBeforeDecode bool isProtobuf bool }{ { - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: "success", - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -120,18 +126,24 @@ func TestResponse(t *testing.T) { isProtobuf: true, }, { - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: "success", - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -142,18 +154,24 @@ func TestResponse(t *testing.T) { isProtobuf: true, }, { - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: "success", - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -163,18 +181,24 @@ func TestResponse(t *testing.T) { isProtobuf: false, }, { - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: "success", - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -185,19 +209,25 @@ func TestResponse(t *testing.T) { isProtobuf: false, }, { - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: "success", Warnings: []string{"test-warn"}, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -207,19 +237,25 @@ func TestResponse(t *testing.T) { isProtobuf: true, }, { - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: "success", Warnings: []string{"test-warn"}, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -284,24 +320,30 @@ func TestResponse(t *testing.T) { func TestResponseWithStats(t *testing.T) { t.Parallel() for i, tc := range []struct { - promBody *PrometheusResponse + promBody *tripperware.PrometheusResponse jsonBody string isProtobuf bool }{ { jsonBody: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}],"stats":{"samples":{"totalQueryableSamples":10,"totalQueryableSamplesPerStep":[[1536673680,5],[1536673780,5]]}}}}`, - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: "success", - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -320,18 +362,24 @@ func TestResponseWithStats(t *testing.T) { }, { jsonBody: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}],"stats":{"samples":{"totalQueryableSamples":10,"totalQueryableSamplesPerStep":[[1536673680,5],[1536673780,5]]}}}}`, - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: "success", - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -402,100 +450,135 @@ func TestMergeAPIResponses(t *testing.T) { { name: "No responses shouldn't panic and return a non-null result and result type.", input: []tripperware.Response{}, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{}, + }, }, }, }, { name: "A single empty response shouldn't panic.", input: []tripperware.Response{ - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Status: StatusSuccess, + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{}, + }, }, }, }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{}, + }, }, }, }, { name: "Multiple empty responses shouldn't panic.", input: []tripperware.Response{ - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{}, + }, }, }, - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{}, + }, }, }, }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{}, + }, + }, + }, }, }, }, { name: "Basic merging of two responses.", input: []tripperware.Response{ - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{}, - Samples: []cortexpb.Sample{ - {Value: 0, TimestampMs: 0}, - {Value: 1, TimestampMs: 1}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{}, + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 1, TimestampMs: 1}, + }, + }, + }, }, }, }, }, }, - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{}, - Samples: []cortexpb.Sample{ - {Value: 2, TimestampMs: 2}, - {Value: 3, TimestampMs: 3}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{}, + Samples: []cortexpb.Sample{ + {Value: 2, TimestampMs: 2}, + {Value: 3, TimestampMs: 3}, + }, + }, + }, }, }, }, }, }, }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{}, - Samples: []cortexpb.Sample{ - {Value: 0, TimestampMs: 0}, - {Value: 1, TimestampMs: 1}, - {Value: 2, TimestampMs: 2}, - {Value: 3, TimestampMs: 3}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{}, + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 1, TimestampMs: 1}, + {Value: 2, TimestampMs: 2}, + {Value: 3, TimestampMs: 3}, + }, + }, + }, }, }, }, @@ -508,18 +591,24 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[0,"0"],[1,"1"]]}]}}`), mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"c":"d","a":"b"},"values":[[2,"2"],[3,"3"]]}]}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 0, TimestampMs: 0}, - {Value: 1, TimestampMs: 1000}, - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 1, TimestampMs: 1000}, + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + }, + }, + }, }, }, }, @@ -532,17 +621,23 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[1,"1"],[2,"2"]]}]}}`), mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"c":"d","a":"b"},"values":[[2,"2"],[3,"3"]]}]}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 1, TimestampMs: 1000}, - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: 1000}, + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + }, + }, + }, }, }, }, @@ -555,19 +650,25 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[1,"1"],[2,"2"],[3,"3"]]}]}}`), mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"c":"d","a":"b"},"values":[[2,"2"],[3,"3"],[4,"4"],[5,"5"]]}]}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 1, TimestampMs: 1000}, - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, - {Value: 4, TimestampMs: 4000}, - {Value: 5, TimestampMs: 5000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: 1000}, + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + {Value: 4, TimestampMs: 4000}, + {Value: 5, TimestampMs: 5000}, + }, + }, + }, }, }, }, @@ -580,16 +681,22 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","warnings":["warning1","warning2"],"data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[1,"1"]]}]}}`), mustParse(t, `{"status":"success","warnings":["warning1","warning3"],"data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[1,"1"]]}]}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, Warnings: []string{"warning1", "warning2", "warning3"}, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 1, TimestampMs: 1000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: 1000}, + }, + }, + }, }, }, }, @@ -602,18 +709,24 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[2,"2"],[3,"3"]]}]}}`), mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"c":"d","a":"b"},"values":[[2,"2"],[3,"3"],[4,"4"],[5,"5"]]}]}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, - {Value: 4, TimestampMs: 4000}, - {Value: 5, TimestampMs: 5000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + {Value: 4, TimestampMs: 4000}, + {Value: 5, TimestampMs: 5000}, + }, + }, + }, }, }, }, @@ -623,29 +736,41 @@ func TestMergeAPIResponses(t *testing.T) { { name: "Context cancel should cancel merge", input: []tripperware.Response{ - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{}, - Samples: []cortexpb.Sample{ - {Value: 0, TimestampMs: 0}, - {Value: 1, TimestampMs: 1}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{}, + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 1, TimestampMs: 1}, + }, + }, + }, }, }, }, }, }, - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{}, - Samples: []cortexpb.Sample{ - {Value: 2, TimestampMs: 2}, - {Value: 3, TimestampMs: 3}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{}, + Samples: []cortexpb.Sample{ + {Value: 2, TimestampMs: 2}, + {Value: 3, TimestampMs: 3}, + }, + }, + }, }, }, }, @@ -658,62 +783,91 @@ func TestMergeAPIResponses(t *testing.T) { { name: "[stats] A single empty response shouldn't panic.", input: []tripperware.Response{ - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Status: StatusSuccess, + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, - Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{}, + }, + Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, }, }, }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, - Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{}, + }, + Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, }, }, }, { name: "[stats] Multiple empty responses shouldn't panic.", input: []tripperware.Response{ - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, - Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{}, + }, + }, + }, + Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, }, }, - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, - Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{}, + }, + }, + }, + Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, }, }, }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{}, - Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{}, + }, + }, + }, + Stats: &tripperware.PrometheusResponseStats{Samples: &tripperware.PrometheusResponseSamplesStats{}}, }, }, }, { name: "[stats] Basic merging of two responses.", input: []tripperware.Response{ - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{}, - Samples: []cortexpb.Sample{ - {Value: 0, TimestampMs: 0}, - {Value: 1, TimestampMs: 1}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{}, + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 1, TimestampMs: 1}, + }, + }, + }, }, }, }, @@ -726,15 +880,21 @@ func TestMergeAPIResponses(t *testing.T) { }}, }, }, - &PrometheusResponse{ - Data: PrometheusData{ + &tripperware.PrometheusResponse{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{}, - Samples: []cortexpb.Sample{ - {Value: 2, TimestampMs: 2}, - {Value: 3, TimestampMs: 3}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{}, + Samples: []cortexpb.Sample{ + {Value: 2, TimestampMs: 2}, + {Value: 3, TimestampMs: 3}, + }, + }, + }, }, }, }, @@ -748,18 +908,24 @@ func TestMergeAPIResponses(t *testing.T) { }, }, }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{}, - Samples: []cortexpb.Sample{ - {Value: 0, TimestampMs: 0}, - {Value: 1, TimestampMs: 1}, - {Value: 2, TimestampMs: 2}, - {Value: 3, TimestampMs: 3}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{}, + Samples: []cortexpb.Sample{ + {Value: 0, TimestampMs: 0}, + {Value: 1, TimestampMs: 1}, + {Value: 2, TimestampMs: 2}, + {Value: 3, TimestampMs: 3}, + }, + }, + }, }, }, }, @@ -781,17 +947,23 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[1,"1"],[2,"2"]]}],"stats":{"samples":{"totalQueryableSamples":10,"totalQueryableSamplesPerStep":[[1,5],[2,5]]}}}}`), mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[2,"2"],[3,"3"]]}],"stats":{"samples":{"totalQueryableSamples":20,"totalQueryableSamplesPerStep":[[2,5],[3,15]]}}}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 1, TimestampMs: 1000}, - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: 1000}, + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + }, + }, + }, }, }, }, @@ -813,21 +985,27 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[1,"1"],[2,"2"],[3,"3"],[4,"4"]]}],"stats":{"samples":{"totalQueryableSamples":6,"totalQueryableSamplesPerStep":[[1,1],[2,2],[3,3],[4,4]]}}}}`), mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[5,"5"],[6,"6"],[7,"7"]]}],"stats":{"samples":{"totalQueryableSamples":18,"totalQueryableSamplesPerStep":[[5,5],[6,6],[7,7]]}}}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 1, TimestampMs: 1000}, - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, - {Value: 4, TimestampMs: 4000}, - {Value: 5, TimestampMs: 5000}, - {Value: 6, TimestampMs: 6000}, - {Value: 7, TimestampMs: 7000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: 1000}, + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + {Value: 4, TimestampMs: 4000}, + {Value: 5, TimestampMs: 5000}, + {Value: 6, TimestampMs: 6000}, + {Value: 7, TimestampMs: 7000}, + }, + }, + }, }, }, }, @@ -852,19 +1030,25 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[1,"1"],[2,"2"],[3,"3"]]}],"stats":{"samples":{"totalQueryableSamples":6,"totalQueryableSamplesPerStep":[[1,1],[2,2],[3,3]]}}}}`), mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"c":"d","a":"b"},"values":[[2,"2"],[3,"3"],[4,"4"],[5,"5"]]}],"stats":{"samples":{"totalQueryableSamples":20,"totalQueryableSamplesPerStep":[[2,2],[3,3],[4,4],[5,5]]}}}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 1, TimestampMs: 1000}, - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, - {Value: 4, TimestampMs: 4000}, - {Value: 5, TimestampMs: 5000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: 1000}, + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + {Value: 4, TimestampMs: 4000}, + {Value: 5, TimestampMs: 5000}, + }, + }, + }, }, }, }, @@ -887,18 +1071,24 @@ func TestMergeAPIResponses(t *testing.T) { mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"a":"b","c":"d"},"values":[[2,"2"],[3,"3"]]}],"stats":{"samples":{"totalQueryableSamples":20,"totalQueryableSamplesPerStep":[[2,2],[3,3]]}}}}`), mustParse(t, `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"c":"d","a":"b"},"values":[[2,"2"],[3,"3"],[4,"4"],[5,"5"]]}],"stats":{"samples":{"totalQueryableSamples":20,"totalQueryableSamplesPerStep":[[2,2],[3,3],[4,4],[5,5]]}}}}`), }, - expected: &PrometheusResponse{ + expected: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, - {Value: 4, TimestampMs: 4000}, - {Value: 5, TimestampMs: 5000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + {Value: 4, TimestampMs: 4000}, + {Value: 5, TimestampMs: 5000}, + }, + }, + }, }, }, }, @@ -938,22 +1128,28 @@ func TestCompressedResponse(t *testing.T) { for i, tc := range []struct { compression string jsonBody string - promBody *PrometheusResponse + promBody *tripperware.PrometheusResponse status int err error }{ { compression: `gzip`, - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + }, + }, + }, }, }, }, @@ -972,16 +1168,22 @@ func TestCompressedResponse(t *testing.T) { }, { compression: `snappy`, - promBody: &PrometheusResponse{ + promBody: &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, - Samples: []cortexpb.Sample{ - {Value: 2, TimestampMs: 2000}, - {Value: 3, TimestampMs: 3000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + Samples: []cortexpb.Sample{ + {Value: 2, TimestampMs: 2000}, + {Value: 3, TimestampMs: 3000}, + }, + }, + }, }, }, }, @@ -1068,14 +1270,14 @@ func TestCompressedResponse(t *testing.T) { if err == nil { require.NoError(t, err) - require.Equal(t, tc.promBody.Data, resp.(*PrometheusResponse).Data) + require.Equal(t, tc.promBody.Data, resp.(*tripperware.PrometheusResponse).Data) } }) } } func mustParse(t *testing.T, response string) tripperware.Response { - var resp PrometheusResponse + var resp tripperware.PrometheusResponse // Needed as goimports automatically add a json import otherwise. json := jsoniter.ConfigCompatibleWithStandardLibrary require.NoError(t, json.Unmarshal([]byte(response), &resp)) diff --git a/pkg/querier/tripperware/queryrange/queryrange.pb.go b/pkg/querier/tripperware/queryrange/queryrange.pb.go deleted file mode 100644 index edf1f6cbe8..0000000000 --- a/pkg/querier/tripperware/queryrange/queryrange.pb.go +++ /dev/null @@ -1,2641 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: queryrange.proto - -package queryrange - -import ( - fmt "fmt" - tripperware "github.com/cortexproject/cortex/pkg/querier/tripperware" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type PrometheusRequest struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"` - End int64 `protobuf:"varint,3,opt,name=end,proto3" json:"end,omitempty"` - Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` - Timeout time.Duration `protobuf:"bytes,5,opt,name=timeout,proto3,stdduration" json:"timeout"` - Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"` - CachingOptions CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"` - Headers []*tripperware.PrometheusRequestHeader `protobuf:"bytes,8,rep,name=Headers,proto3" json:"-"` - Stats string `protobuf:"bytes,9,opt,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *PrometheusRequest) Reset() { *m = PrometheusRequest{} } -func (*PrometheusRequest) ProtoMessage() {} -func (*PrometheusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{0} -} -func (m *PrometheusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusRequest.Merge(m, src) -} -func (m *PrometheusRequest) XXX_Size() int { - return m.Size() -} -func (m *PrometheusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusRequest proto.InternalMessageInfo - -func (m *PrometheusRequest) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *PrometheusRequest) GetStart() int64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *PrometheusRequest) GetEnd() int64 { - if m != nil { - return m.End - } - return 0 -} - -func (m *PrometheusRequest) GetStep() int64 { - if m != nil { - return m.Step - } - return 0 -} - -func (m *PrometheusRequest) GetTimeout() time.Duration { - if m != nil { - return m.Timeout - } - return 0 -} - -func (m *PrometheusRequest) GetQuery() string { - if m != nil { - return m.Query - } - return "" -} - -func (m *PrometheusRequest) GetCachingOptions() CachingOptions { - if m != nil { - return m.CachingOptions - } - return CachingOptions{} -} - -func (m *PrometheusRequest) GetHeaders() []*tripperware.PrometheusRequestHeader { - if m != nil { - return m.Headers - } - return nil -} - -func (m *PrometheusRequest) GetStats() string { - if m != nil { - return m.Stats - } - return "" -} - -type PrometheusResponse struct { - Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` - Data PrometheusData `protobuf:"bytes,2,opt,name=Data,proto3" json:"data,omitempty"` - ErrorType string `protobuf:"bytes,3,opt,name=ErrorType,proto3" json:"errorType,omitempty"` - Error string `protobuf:"bytes,4,opt,name=Error,proto3" json:"error,omitempty"` - Headers []*tripperware.PrometheusResponseHeader `protobuf:"bytes,5,rep,name=Headers,proto3" json:"-"` - Warnings []string `protobuf:"bytes,6,rep,name=Warnings,proto3" json:"warnings,omitempty"` -} - -func (m *PrometheusResponse) Reset() { *m = PrometheusResponse{} } -func (*PrometheusResponse) ProtoMessage() {} -func (*PrometheusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{1} -} -func (m *PrometheusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusResponse.Merge(m, src) -} -func (m *PrometheusResponse) XXX_Size() int { - return m.Size() -} -func (m *PrometheusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusResponse proto.InternalMessageInfo - -func (m *PrometheusResponse) GetStatus() string { - if m != nil { - return m.Status - } - return "" -} - -func (m *PrometheusResponse) GetData() PrometheusData { - if m != nil { - return m.Data - } - return PrometheusData{} -} - -func (m *PrometheusResponse) GetErrorType() string { - if m != nil { - return m.ErrorType - } - return "" -} - -func (m *PrometheusResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *PrometheusResponse) GetHeaders() []*tripperware.PrometheusResponseHeader { - if m != nil { - return m.Headers - } - return nil -} - -func (m *PrometheusResponse) GetWarnings() []string { - if m != nil { - return m.Warnings - } - return nil -} - -type PrometheusData struct { - ResultType string `protobuf:"bytes,1,opt,name=ResultType,proto3" json:"resultType"` - Result []tripperware.SampleStream `protobuf:"bytes,2,rep,name=Result,proto3" json:"result"` - Stats *tripperware.PrometheusResponseStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *PrometheusData) Reset() { *m = PrometheusData{} } -func (*PrometheusData) ProtoMessage() {} -func (*PrometheusData) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{2} -} -func (m *PrometheusData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusData) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusData.Merge(m, src) -} -func (m *PrometheusData) XXX_Size() int { - return m.Size() -} -func (m *PrometheusData) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusData.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusData proto.InternalMessageInfo - -func (m *PrometheusData) GetResultType() string { - if m != nil { - return m.ResultType - } - return "" -} - -func (m *PrometheusData) GetResult() []tripperware.SampleStream { - if m != nil { - return m.Result - } - return nil -} - -func (m *PrometheusData) GetStats() *tripperware.PrometheusResponseStats { - if m != nil { - return m.Stats - } - return nil -} - -type CachedResponse struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"` - // List of cached responses; non-overlapping and in order. - Extents []Extent `protobuf:"bytes,2,rep,name=extents,proto3" json:"extents"` -} - -func (m *CachedResponse) Reset() { *m = CachedResponse{} } -func (*CachedResponse) ProtoMessage() {} -func (*CachedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{3} -} -func (m *CachedResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CachedResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CachedResponse.Merge(m, src) -} -func (m *CachedResponse) XXX_Size() int { - return m.Size() -} -func (m *CachedResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CachedResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CachedResponse proto.InternalMessageInfo - -func (m *CachedResponse) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *CachedResponse) GetExtents() []Extent { - if m != nil { - return m.Extents - } - return nil -} - -type Extent struct { - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start"` - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end"` - TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"-"` - Response *types.Any `protobuf:"bytes,5,opt,name=response,proto3" json:"response"` -} - -func (m *Extent) Reset() { *m = Extent{} } -func (*Extent) ProtoMessage() {} -func (*Extent) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{4} -} -func (m *Extent) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Extent.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Extent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Extent.Merge(m, src) -} -func (m *Extent) XXX_Size() int { - return m.Size() -} -func (m *Extent) XXX_DiscardUnknown() { - xxx_messageInfo_Extent.DiscardUnknown(m) -} - -var xxx_messageInfo_Extent proto.InternalMessageInfo - -func (m *Extent) GetStart() int64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *Extent) GetEnd() int64 { - if m != nil { - return m.End - } - return 0 -} - -func (m *Extent) GetTraceId() string { - if m != nil { - return m.TraceId - } - return "" -} - -func (m *Extent) GetResponse() *types.Any { - if m != nil { - return m.Response - } - return nil -} - -type CachingOptions struct { - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (m *CachingOptions) Reset() { *m = CachingOptions{} } -func (*CachingOptions) ProtoMessage() {} -func (*CachingOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{5} -} -func (m *CachingOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CachingOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CachingOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CachingOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CachingOptions.Merge(m, src) -} -func (m *CachingOptions) XXX_Size() int { - return m.Size() -} -func (m *CachingOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CachingOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_CachingOptions proto.InternalMessageInfo - -func (m *CachingOptions) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func init() { - proto.RegisterType((*PrometheusRequest)(nil), "queryrange.PrometheusRequest") - proto.RegisterType((*PrometheusResponse)(nil), "queryrange.PrometheusResponse") - proto.RegisterType((*PrometheusData)(nil), "queryrange.PrometheusData") - proto.RegisterType((*CachedResponse)(nil), "queryrange.CachedResponse") - proto.RegisterType((*Extent)(nil), "queryrange.Extent") - proto.RegisterType((*CachingOptions)(nil), "queryrange.CachingOptions") -} - -func init() { proto.RegisterFile("queryrange.proto", fileDescriptor_79b02382e213d0b2) } - -var fileDescriptor_79b02382e213d0b2 = []byte{ - // 792 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xcd, 0x4e, 0xe3, 0x48, - 0x10, 0x8e, 0xe3, 0xc4, 0x71, 0x9a, 0x55, 0x60, 0x1b, 0xc4, 0x3a, 0x39, 0xd8, 0x51, 0xc4, 0x4a, - 0x59, 0x89, 0x75, 0x24, 0x56, 0x7b, 0xdc, 0xd5, 0x62, 0x7e, 0xc4, 0xee, 0x85, 0x95, 0x19, 0x69, - 0xa4, 0xb9, 0x8c, 0x3a, 0x71, 0x8f, 0x63, 0x48, 0x6c, 0xd3, 0xdd, 0x16, 0xe4, 0x36, 0x8f, 0x30, - 0xc7, 0x79, 0x84, 0x39, 0xcc, 0x83, 0x70, 0xe4, 0x30, 0x07, 0x4e, 0x66, 0x08, 0x97, 0x91, 0x4f, - 0x3c, 0xc2, 0xc8, 0xdd, 0xed, 0xc4, 0x80, 0xd0, 0x5c, 0xac, 0xaa, 0xea, 0xaf, 0xca, 0xf5, 0x7d, - 0xd5, 0xd5, 0x60, 0xed, 0x3c, 0xc1, 0x64, 0x46, 0x50, 0xe8, 0x63, 0x3b, 0x26, 0x11, 0x8b, 0x20, - 0x58, 0x46, 0x3a, 0x1b, 0x7e, 0xe4, 0x47, 0x3c, 0x3c, 0xc8, 0x2d, 0x81, 0xe8, 0x98, 0x7e, 0x14, - 0xf9, 0x13, 0x3c, 0xe0, 0xde, 0x30, 0x79, 0x37, 0xf0, 0x12, 0x82, 0x58, 0x10, 0x85, 0xf2, 0xbc, - 0xfd, 0xf4, 0x1c, 0x85, 0x33, 0x79, 0xb4, 0xe7, 0x07, 0x6c, 0x9c, 0x0c, 0xed, 0x51, 0x34, 0x1d, - 0x8c, 0x22, 0xc2, 0xf0, 0x65, 0x4c, 0xa2, 0x53, 0x3c, 0x62, 0xd2, 0x1b, 0xc4, 0x67, 0xfe, 0x20, - 0x6f, 0x20, 0xc0, 0x64, 0xc0, 0x48, 0x10, 0xc7, 0x98, 0x5c, 0x20, 0x82, 0x79, 0x4c, 0x16, 0xe9, - 0xdd, 0x56, 0xc1, 0xcf, 0xff, 0x93, 0x68, 0x8a, 0xd9, 0x18, 0x27, 0xd4, 0xc5, 0xe7, 0x09, 0xa6, - 0x0c, 0x42, 0x50, 0x8b, 0x11, 0x1b, 0x1b, 0x4a, 0x57, 0xe9, 0x37, 0x5d, 0x6e, 0xc3, 0x0d, 0x50, - 0xa7, 0x0c, 0x11, 0x66, 0x54, 0xbb, 0x4a, 0x5f, 0x75, 0x85, 0x03, 0xd7, 0x80, 0x8a, 0x43, 0xcf, - 0x50, 0x79, 0x2c, 0x37, 0xf3, 0x5c, 0xca, 0x70, 0x6c, 0xd4, 0x78, 0x88, 0xdb, 0xf0, 0x2f, 0xd0, - 0x60, 0xc1, 0x14, 0x47, 0x09, 0x33, 0xea, 0x5d, 0xa5, 0xbf, 0xb2, 0xd3, 0xb6, 0x05, 0x2f, 0xbb, - 0xe0, 0x65, 0xef, 0x4b, 0xde, 0x8e, 0x7e, 0x95, 0x5a, 0x95, 0x8f, 0xb7, 0x96, 0xe2, 0x16, 0x39, - 0xf9, 0xaf, 0x79, 0xcf, 0x86, 0xc6, 0xfb, 0x11, 0x0e, 0x3c, 0x02, 0xad, 0x11, 0x1a, 0x8d, 0x83, - 0xd0, 0x3f, 0x8e, 0xf3, 0x4c, 0x6a, 0x34, 0x78, 0xed, 0x8e, 0x5d, 0x9a, 0xc3, 0xde, 0x23, 0x84, - 0x53, 0xcb, 0x8b, 0xbb, 0x4f, 0xf2, 0xe0, 0x01, 0x68, 0x1c, 0x61, 0xe4, 0x61, 0x42, 0x0d, 0xbd, - 0xab, 0xf6, 0x57, 0x76, 0xb6, 0xec, 0x92, 0x5e, 0xf6, 0x33, 0x7d, 0x04, 0xd8, 0xa9, 0x67, 0xa9, - 0xa5, 0xfc, 0xee, 0x16, 0xb9, 0x52, 0x21, 0x46, 0x8d, 0xa6, 0x68, 0x93, 0x3b, 0xbd, 0xb4, 0x0a, - 0x60, 0xb9, 0x02, 0x8d, 0xa3, 0x90, 0x62, 0xd8, 0x03, 0xda, 0x09, 0x43, 0x2c, 0xa1, 0x42, 0x64, - 0x07, 0x64, 0xa9, 0xa5, 0x51, 0x1e, 0x71, 0xe5, 0x09, 0x3c, 0x04, 0xb5, 0x7d, 0xc4, 0x10, 0x57, - 0xfc, 0x09, 0xaf, 0x65, 0xc5, 0x1c, 0xe1, 0x6c, 0xe6, 0xbc, 0xb2, 0xd4, 0x6a, 0x79, 0x88, 0xa1, - 0xed, 0x68, 0x1a, 0x30, 0x3c, 0x8d, 0xd9, 0xcc, 0xe5, 0xf9, 0xf0, 0x4f, 0xd0, 0x3c, 0x20, 0x24, - 0x22, 0xaf, 0x66, 0x31, 0xe6, 0xa3, 0x6a, 0x3a, 0xbf, 0x64, 0xa9, 0xb5, 0x8e, 0x8b, 0x60, 0x29, - 0x63, 0x89, 0x84, 0xbf, 0x81, 0x3a, 0x77, 0xf8, 0x28, 0x9b, 0xce, 0x7a, 0x96, 0x5a, 0xab, 0x3c, - 0xa5, 0x04, 0x17, 0x08, 0x78, 0xb8, 0x54, 0xb0, 0xce, 0x15, 0xfc, 0xf5, 0x45, 0x05, 0x05, 0xff, - 0x17, 0x24, 0xdc, 0x01, 0xfa, 0x6b, 0x44, 0xc2, 0x20, 0xf4, 0xa9, 0xa1, 0x75, 0xd5, 0x7e, 0xd3, - 0xd9, 0xcc, 0x52, 0x0b, 0x5e, 0xc8, 0x58, 0xe9, 0xc7, 0x0b, 0x5c, 0xef, 0x8b, 0x02, 0x5a, 0x8f, - 0xe5, 0x80, 0x36, 0x00, 0x2e, 0xa6, 0xc9, 0x84, 0x71, 0xc6, 0x42, 0xe0, 0x56, 0x96, 0x5a, 0x80, - 0x2c, 0xa2, 0x6e, 0x09, 0x01, 0x77, 0x81, 0x26, 0x3c, 0xa3, 0xca, 0xbb, 0x6f, 0x3f, 0xea, 0xfe, - 0x04, 0x4d, 0xe3, 0x09, 0x3e, 0x61, 0x04, 0xa3, 0xa9, 0xd3, 0x92, 0x4a, 0x6b, 0xa2, 0x94, 0x2b, - 0x13, 0xe1, 0x71, 0x31, 0x7c, 0x95, 0x0f, 0x6b, 0xeb, 0x07, 0xfc, 0xf3, 0x09, 0x53, 0x21, 0x29, - 0x4f, 0x2b, 0x4b, 0x2a, 0xee, 0xcd, 0x29, 0x68, 0xe5, 0x97, 0x17, 0x7b, 0x8b, 0x2b, 0xd3, 0x06, - 0xea, 0x19, 0x9e, 0x49, 0x3a, 0x8d, 0x2c, 0xb5, 0x72, 0xd7, 0xcd, 0x3f, 0xf9, 0x82, 0xe1, 0x4b, - 0x86, 0x43, 0x46, 0x25, 0x03, 0x58, 0xbe, 0x2c, 0x07, 0xfc, 0xc8, 0x59, 0x95, 0xad, 0x17, 0x50, - 0xb7, 0x30, 0x7a, 0x9f, 0x15, 0xa0, 0x09, 0x10, 0xb4, 0x8a, 0x35, 0xcf, 0x7f, 0xa3, 0x3a, 0xcd, - 0x2c, 0xb5, 0x44, 0xa0, 0xd8, 0xf8, 0xb6, 0xd8, 0x78, 0xfe, 0x0a, 0x88, 0x2e, 0x70, 0xe8, 0x89, - 0xd5, 0xef, 0x02, 0x9d, 0x11, 0x34, 0xc2, 0x6f, 0x03, 0x4f, 0xde, 0x99, 0x62, 0xbe, 0x3c, 0xfc, - 0xaf, 0x07, 0xff, 0x06, 0x3a, 0x91, 0x74, 0xe4, 0x4b, 0xb0, 0xf1, 0xec, 0x25, 0xd8, 0x0d, 0x67, - 0xce, 0x4f, 0x59, 0x6a, 0x2d, 0x90, 0xee, 0xc2, 0xfa, 0xaf, 0xa6, 0xab, 0x6b, 0xb5, 0xde, 0xb6, - 0x90, 0xa6, 0xb4, 0xc1, 0x1d, 0xa0, 0x7b, 0x01, 0x45, 0xc3, 0x09, 0xf6, 0x78, 0xe3, 0xba, 0xbb, - 0xf0, 0x9d, 0x7f, 0xae, 0xef, 0xcc, 0xca, 0xcd, 0x9d, 0x59, 0x79, 0xb8, 0x33, 0x95, 0xf7, 0x73, - 0x53, 0xf9, 0x34, 0x37, 0x95, 0xab, 0xb9, 0xa9, 0x5c, 0xcf, 0x4d, 0xe5, 0xeb, 0xdc, 0x54, 0xbe, - 0xcd, 0xcd, 0xca, 0xc3, 0xdc, 0x54, 0x3e, 0xdc, 0x9b, 0x95, 0xeb, 0x7b, 0xb3, 0x72, 0x73, 0x6f, - 0x56, 0xde, 0x94, 0x9e, 0xee, 0xa1, 0xc6, 0x7b, 0xfb, 0xe3, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x72, 0xed, 0xdd, 0xa8, 0xe1, 0x05, 0x00, 0x00, -} - -func (this *PrometheusRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusRequest) - if !ok { - that2, ok := that.(PrometheusRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Path != that1.Path { - return false - } - if this.Start != that1.Start { - return false - } - if this.End != that1.End { - return false - } - if this.Step != that1.Step { - return false - } - if this.Timeout != that1.Timeout { - return false - } - if this.Query != that1.Query { - return false - } - if !this.CachingOptions.Equal(&that1.CachingOptions) { - return false - } - if len(this.Headers) != len(that1.Headers) { - return false - } - for i := range this.Headers { - if !this.Headers[i].Equal(that1.Headers[i]) { - return false - } - } - if this.Stats != that1.Stats { - return false - } - return true -} -func (this *PrometheusResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusResponse) - if !ok { - that2, ok := that.(PrometheusResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Status != that1.Status { - return false - } - if !this.Data.Equal(&that1.Data) { - return false - } - if this.ErrorType != that1.ErrorType { - return false - } - if this.Error != that1.Error { - return false - } - if len(this.Headers) != len(that1.Headers) { - return false - } - for i := range this.Headers { - if !this.Headers[i].Equal(that1.Headers[i]) { - return false - } - } - if len(this.Warnings) != len(that1.Warnings) { - return false - } - for i := range this.Warnings { - if this.Warnings[i] != that1.Warnings[i] { - return false - } - } - return true -} -func (this *PrometheusData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusData) - if !ok { - that2, ok := that.(PrometheusData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ResultType != that1.ResultType { - return false - } - if len(this.Result) != len(that1.Result) { - return false - } - for i := range this.Result { - if !this.Result[i].Equal(&that1.Result[i]) { - return false - } - } - if !this.Stats.Equal(that1.Stats) { - return false - } - return true -} -func (this *CachedResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CachedResponse) - if !ok { - that2, ok := that.(CachedResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Key != that1.Key { - return false - } - if len(this.Extents) != len(that1.Extents) { - return false - } - for i := range this.Extents { - if !this.Extents[i].Equal(&that1.Extents[i]) { - return false - } - } - return true -} -func (this *Extent) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Extent) - if !ok { - that2, ok := that.(Extent) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Start != that1.Start { - return false - } - if this.End != that1.End { - return false - } - if this.TraceId != that1.TraceId { - return false - } - if !this.Response.Equal(that1.Response) { - return false - } - return true -} -func (this *CachingOptions) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CachingOptions) - if !ok { - that2, ok := that.(CachingOptions) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Disabled != that1.Disabled { - return false - } - return true -} -func (this *PrometheusRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 13) - s = append(s, "&queryrange.PrometheusRequest{") - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") - s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") - s = append(s, "Step: "+fmt.Sprintf("%#v", this.Step)+",\n") - s = append(s, "Timeout: "+fmt.Sprintf("%#v", this.Timeout)+",\n") - s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") - s = append(s, "CachingOptions: "+strings.Replace(this.CachingOptions.GoString(), `&`, ``, 1)+",\n") - if this.Headers != nil { - s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") - } - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&queryrange.PrometheusResponse{") - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - s = append(s, "Data: "+strings.Replace(this.Data.GoString(), `&`, ``, 1)+",\n") - s = append(s, "ErrorType: "+fmt.Sprintf("%#v", this.ErrorType)+",\n") - s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") - if this.Headers != nil { - s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") - } - s = append(s, "Warnings: "+fmt.Sprintf("%#v", this.Warnings)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusData) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&queryrange.PrometheusData{") - s = append(s, "ResultType: "+fmt.Sprintf("%#v", this.ResultType)+",\n") - if this.Result != nil { - vs := make([]*tripperware.SampleStream, len(this.Result)) - for i := range vs { - vs[i] = &this.Result[i] - } - s = append(s, "Result: "+fmt.Sprintf("%#v", vs)+",\n") - } - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CachedResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&queryrange.CachedResponse{") - s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") - if this.Extents != nil { - vs := make([]*Extent, len(this.Extents)) - for i := range vs { - vs[i] = &this.Extents[i] - } - s = append(s, "Extents: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Extent) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&queryrange.Extent{") - s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") - s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") - s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") - if this.Response != nil { - s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CachingOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&queryrange.CachingOptions{") - s = append(s, "Disabled: "+fmt.Sprintf("%#v", this.Disabled)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringQueryrange(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *PrometheusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Stats) > 0 { - i -= len(m.Stats) - copy(dAtA[i:], m.Stats) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Stats))) - i-- - dAtA[i] = 0x4a - } - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - { - size, err := m.CachingOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x32 - } - n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Timeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Timeout):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintQueryrange(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x2a - if m.Step != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Step)) - i-- - dAtA[i] = 0x20 - } - if m.End != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) - i-- - dAtA[i] = 0x18 - } - if m.Start != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x10 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Warnings) > 0 { - for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Warnings[iNdEx]) - copy(dAtA[i:], m.Warnings[iNdEx]) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Warnings[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x22 - } - if len(m.ErrorType) > 0 { - i -= len(m.ErrorType) - copy(dAtA[i:], m.ErrorType) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ErrorType))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Status) > 0 { - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Stats != nil { - { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Result) > 0 { - for iNdEx := len(m.Result) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Result[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.ResultType) > 0 { - i -= len(m.ResultType) - copy(dAtA[i:], m.ResultType) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ResultType))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CachedResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Extents) > 0 { - for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Extent) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Extent) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.TraceId) > 0 { - i -= len(m.TraceId) - copy(dAtA[i:], m.TraceId) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.TraceId))) - i-- - dAtA[i] = 0x22 - } - if m.End != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) - i-- - dAtA[i] = 0x10 - } - if m.Start != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CachingOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CachingOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CachingOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Disabled { - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintQueryrange(dAtA []byte, offset int, v uint64) int { - offset -= sovQueryrange(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PrometheusRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if m.Start != 0 { - n += 1 + sovQueryrange(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovQueryrange(uint64(m.End)) - } - if m.Step != 0 { - n += 1 + sovQueryrange(uint64(m.Step)) - } - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Timeout) - n += 1 + l + sovQueryrange(uint64(l)) - l = len(m.Query) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - l = m.CachingOptions.Size() - n += 1 + l + sovQueryrange(uint64(l)) - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - l = len(m.Stats) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - return n -} - -func (m *PrometheusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Status) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - l = m.Data.Size() - n += 1 + l + sovQueryrange(uint64(l)) - l = len(m.ErrorType) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - if len(m.Warnings) > 0 { - for _, s := range m.Warnings { - l = len(s) - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *PrometheusData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ResultType) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Result) > 0 { - for _, e := range m.Result { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - if m.Stats != nil { - l = m.Stats.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - return n -} - -func (m *CachedResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Extents) > 0 { - for _, e := range m.Extents { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *Extent) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Start != 0 { - n += 1 + sovQueryrange(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovQueryrange(uint64(m.End)) - } - l = len(m.TraceId) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - return n -} - -func (m *CachingOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Disabled { - n += 2 - } - return n -} - -func sovQueryrange(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQueryrange(x uint64) (n int) { - return sovQueryrange(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PrometheusRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForHeaders := "[]*PrometheusRequestHeader{" - for _, f := range this.Headers { - repeatedStringForHeaders += strings.Replace(fmt.Sprintf("%v", f), "PrometheusRequestHeader", "tripperware.PrometheusRequestHeader", 1) + "," - } - repeatedStringForHeaders += "}" - s := strings.Join([]string{`&PrometheusRequest{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Start:` + fmt.Sprintf("%v", this.Start) + `,`, - `End:` + fmt.Sprintf("%v", this.End) + `,`, - `Step:` + fmt.Sprintf("%v", this.Step) + `,`, - `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, - `Query:` + fmt.Sprintf("%v", this.Query) + `,`, - `CachingOptions:` + strings.Replace(strings.Replace(this.CachingOptions.String(), "CachingOptions", "CachingOptions", 1), `&`, ``, 1) + `,`, - `Headers:` + repeatedStringForHeaders + `,`, - `Stats:` + fmt.Sprintf("%v", this.Stats) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForHeaders := "[]*PrometheusResponseHeader{" - for _, f := range this.Headers { - repeatedStringForHeaders += strings.Replace(fmt.Sprintf("%v", f), "PrometheusResponseHeader", "tripperware.PrometheusResponseHeader", 1) + "," - } - repeatedStringForHeaders += "}" - s := strings.Join([]string{`&PrometheusResponse{`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "PrometheusData", "PrometheusData", 1), `&`, ``, 1) + `,`, - `ErrorType:` + fmt.Sprintf("%v", this.ErrorType) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Headers:` + repeatedStringForHeaders + `,`, - `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusData) String() string { - if this == nil { - return "nil" - } - repeatedStringForResult := "[]SampleStream{" - for _, f := range this.Result { - repeatedStringForResult += fmt.Sprintf("%v", f) + "," - } - repeatedStringForResult += "}" - s := strings.Join([]string{`&PrometheusData{`, - `ResultType:` + fmt.Sprintf("%v", this.ResultType) + `,`, - `Result:` + repeatedStringForResult + `,`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "PrometheusResponseStats", "tripperware.PrometheusResponseStats", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CachedResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForExtents := "[]Extent{" - for _, f := range this.Extents { - repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + "," - } - repeatedStringForExtents += "}" - s := strings.Join([]string{`&CachedResponse{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Extents:` + repeatedStringForExtents + `,`, - `}`, - }, "") - return s -} -func (this *Extent) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Extent{`, - `Start:` + fmt.Sprintf("%v", this.Start) + `,`, - `End:` + fmt.Sprintf("%v", this.End) + `,`, - `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, - `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "Any", "types.Any", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CachingOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CachingOptions{`, - `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, - `}`, - }, "") - return s -} -func valueToStringQueryrange(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PrometheusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.End |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) - } - m.Step = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Step |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Timeout, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CachingOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CachingOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, &tripperware.PrometheusRequestHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, &tripperware.PrometheusResponseHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResultType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Result = append(m.Result, tripperware.SampleStream{}) - if err := m.Result[len(m.Result)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stats == nil { - m.Stats = &tripperware.PrometheusResponseStats{} - } - if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CachedResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CachedResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CachedResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extents", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Extents = append(m.Extents, Extent{}) - if err := m.Extents[len(m.Extents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Extent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Extent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Extent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.End |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Response == nil { - m.Response = &types.Any{} - } - if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CachingOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CachingOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CachingOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQueryrange(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQueryrange - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQueryrange - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQueryrange - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQueryrange - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthQueryrange - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQueryrange - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipQueryrange(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthQueryrange - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthQueryrange = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQueryrange = fmt.Errorf("proto: integer overflow") -) diff --git a/pkg/querier/tripperware/queryrange/queryrange.proto b/pkg/querier/tripperware/queryrange/queryrange.proto deleted file mode 100644 index 3642409504..0000000000 --- a/pkg/querier/tripperware/queryrange/queryrange.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; - -package queryrange; - -option go_package = "queryrange"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/any.proto"; -import "github.com/cortexproject/cortex/pkg/querier/tripperware/query.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -message PrometheusRequest { - string path = 1; - int64 start = 2; - int64 end = 3; - int64 step = 4; - google.protobuf.Duration timeout = 5 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; - string query = 6; - CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false]; - repeated tripperware.PrometheusRequestHeader Headers = 8 [(gogoproto.jsontag) = "-"]; - string stats = 9; -} - -message PrometheusResponse { - string Status = 1 [(gogoproto.jsontag) = "status"]; - PrometheusData Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; - string ErrorType = 3 [(gogoproto.jsontag) = "errorType,omitempty"]; - string Error = 4 [(gogoproto.jsontag) = "error,omitempty"]; - repeated tripperware.PrometheusResponseHeader Headers = 5 [(gogoproto.jsontag) = "-"]; - repeated string Warnings = 6 [(gogoproto.jsontag) = "warnings,omitempty"]; -} - -message PrometheusData { - string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; - repeated tripperware.SampleStream Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result"]; - tripperware.PrometheusResponseStats stats = 3 [(gogoproto.jsontag) = "stats,omitempty"]; -} - -message CachedResponse { - string key = 1 [(gogoproto.jsontag) = "key"]; - - // List of cached responses; non-overlapping and in order. - repeated Extent extents = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "extents"]; -} - -message Extent { - int64 start = 1 [(gogoproto.jsontag) = "start"]; - int64 end = 2 [(gogoproto.jsontag) = "end"]; - // reserved the previous key to ensure cache transition - reserved 3; - string trace_id = 4 [(gogoproto.jsontag) = "-"]; - google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"]; -} - -message CachingOptions { - bool disabled = 1; -} diff --git a/pkg/querier/tripperware/queryrange/results_cache.go b/pkg/querier/tripperware/queryrange/results_cache.go index 5d5f073ed7..128a24130c 100644 --- a/pkg/querier/tripperware/queryrange/results_cache.go +++ b/pkg/querier/tripperware/queryrange/results_cache.go @@ -85,13 +85,19 @@ type PrometheusResponseExtractor struct{} // Extract extracts response for specific a range from a response. func (PrometheusResponseExtractor) Extract(start, end int64, from tripperware.Response) tripperware.Response { - promRes := from.(*PrometheusResponse) - return &PrometheusResponse{ + promRes := from.(*tripperware.PrometheusResponse) + return &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: promRes.Data.ResultType, - Result: extractMatrix(start, end, promRes.Data.Result), - Stats: extractStats(start, end, promRes.Data.Stats), + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: extractSampleStreams(start, end, promRes.Data.Result.GetMatrix().GetSampleStreams()), + }, + }, + }, + Stats: extractStats(start, end, promRes.Data.Stats), }, Headers: promRes.Headers, Warnings: promRes.Warnings, @@ -101,10 +107,10 @@ func (PrometheusResponseExtractor) Extract(start, end int64, from tripperware.Re // ResponseWithoutHeaders is useful in caching data without headers since // we anyways do not need headers for sending back the response so this saves some space by reducing size of the objects. func (PrometheusResponseExtractor) ResponseWithoutHeaders(resp tripperware.Response) tripperware.Response { - promRes := resp.(*PrometheusResponse) - return &PrometheusResponse{ + promRes := resp.(*tripperware.PrometheusResponse) + return &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: promRes.Data.ResultType, Result: promRes.Data.Result, Stats: promRes.Data.Stats, @@ -115,10 +121,10 @@ func (PrometheusResponseExtractor) ResponseWithoutHeaders(resp tripperware.Respo // ResponseWithoutStats is returns the response without the stats information func (PrometheusResponseExtractor) ResponseWithoutStats(resp tripperware.Response) tripperware.Response { - promRes := resp.(*PrometheusResponse) - return &PrometheusResponse{ + promRes := resp.(*tripperware.PrometheusResponse) + return &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: promRes.Data.ResultType, Result: promRes.Data.Result, }, @@ -223,7 +229,7 @@ func (s resultsCache) Do(ctx context.Context, r tripperware.Request) (tripperwar var ( key = s.splitter.GenerateCacheKey(tenant.JoinTenantIDs(tenantIDs), r) - extents []Extent + extents []tripperware.Extent response tripperware.Response ) @@ -378,7 +384,7 @@ func getHeaderValuesWithName(r tripperware.Response, headerName string) (headerV return } -func (s resultsCache) handleMiss(ctx context.Context, r tripperware.Request, maxCacheTime int64) (tripperware.Response, []Extent, error) { +func (s resultsCache) handleMiss(ctx context.Context, r tripperware.Request, maxCacheTime int64) (tripperware.Response, []tripperware.Extent, error) { level.Debug(util_log.WithContext(ctx, s.logger)).Log("msg", "handle miss", "start", r.GetStart(), "spanID", jaegerSpanID(ctx)) response, err := s.next.Do(ctx, r) if err != nil { @@ -386,7 +392,7 @@ func (s resultsCache) handleMiss(ctx context.Context, r tripperware.Request, max } if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) { - return response, []Extent{}, nil + return response, []tripperware.Extent{}, nil } extent, err := toExtent(ctx, r, s.extractor.ResponseWithoutHeaders(response)) @@ -394,13 +400,13 @@ func (s resultsCache) handleMiss(ctx context.Context, r tripperware.Request, max return nil, nil, err } - extents := []Extent{ + extents := []tripperware.Extent{ extent, } return response, extents, nil } -func (s resultsCache) handleHit(ctx context.Context, r tripperware.Request, extents []Extent, maxCacheTime int64) (tripperware.Response, []Extent, error) { +func (s resultsCache) handleHit(ctx context.Context, r tripperware.Request, extents []tripperware.Extent, maxCacheTime int64) (tripperware.Response, []tripperware.Extent, error) { var ( reqResps []tripperware.RequestResponse err error @@ -452,7 +458,7 @@ func (s resultsCache) handleHit(ctx context.Context, r tripperware.Request, exte if err != nil { return nil, nil, err } - mergedExtents := make([]Extent, 0, len(extents)) + mergedExtents := make([]tripperware.Extent, 0, len(extents)) for i := 1; i < len(extents); i++ { if accumulator.End+r.GetStep() < extents[i].Start { @@ -473,7 +479,7 @@ func (s resultsCache) handleHit(ctx context.Context, r tripperware.Request, exte accumulator.TraceId = jaegerTraceID(ctx) accumulator.End = extents[i].End - currentRes, err := extents[i].toResponse() + currentRes, err := extents[i].ToResponse() if err != nil { return nil, nil, err } @@ -495,15 +501,15 @@ func (s resultsCache) handleHit(ctx context.Context, r tripperware.Request, exte type accumulator struct { tripperware.Response - Extent + tripperware.Extent } -func merge(extents []Extent, acc *accumulator) ([]Extent, error) { +func merge(extents []tripperware.Extent, acc *accumulator) ([]tripperware.Extent, error) { any, err := types.MarshalAny(acc.Response) if err != nil { return nil, err } - return append(extents, Extent{ + return append(extents, tripperware.Extent{ Start: acc.Extent.Start, End: acc.Extent.End, Response: any, @@ -511,8 +517,8 @@ func merge(extents []Extent, acc *accumulator) ([]Extent, error) { }), nil } -func newAccumulator(base Extent) (*accumulator, error) { - res, err := base.toResponse() +func newAccumulator(base tripperware.Extent) (*accumulator, error) { + res, err := base.ToResponse() if err != nil { return nil, err } @@ -522,12 +528,12 @@ func newAccumulator(base Extent) (*accumulator, error) { }, nil } -func toExtent(ctx context.Context, req tripperware.Request, res tripperware.Response) (Extent, error) { +func toExtent(ctx context.Context, req tripperware.Request, res tripperware.Response) (tripperware.Extent, error) { any, err := types.MarshalAny(res) if err != nil { - return Extent{}, err + return tripperware.Extent{}, err } - return Extent{ + return tripperware.Extent{ Start: req.GetStart(), End: req.GetEnd(), Response: any, @@ -537,7 +543,7 @@ func toExtent(ctx context.Context, req tripperware.Request, res tripperware.Resp // partition calculates the required requests to satisfy req given the cached data. // extents must be in order by start time. -func (s resultsCache) partition(req tripperware.Request, extents []Extent) ([]tripperware.Request, []tripperware.Response, error) { +func (s resultsCache) partition(req tripperware.Request, extents []tripperware.Extent) ([]tripperware.Request, []tripperware.Response, error) { var requests []tripperware.Request var cachedResponses []tripperware.Response start := req.GetStart() @@ -563,7 +569,7 @@ func (s resultsCache) partition(req tripperware.Request, extents []Extent) ([]tr r := req.WithStartEnd(start, extent.Start) requests = append(requests, r) } - res, err := extent.toResponse() + res, err := extent.ToResponse() if err != nil { return nil, nil, err } @@ -587,13 +593,13 @@ func (s resultsCache) partition(req tripperware.Request, extents []Extent) ([]tr return requests, cachedResponses, nil } -func (s resultsCache) filterRecentExtents(req tripperware.Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) { +func (s resultsCache) filterRecentExtents(req tripperware.Request, maxCacheFreshness time.Duration, extents []tripperware.Extent) ([]tripperware.Extent, error) { maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / req.GetStep()) * req.GetStep() for i := range extents { // Never cache data for the latest freshness period. if extents[i].End > maxCacheTime { extents[i].End = maxCacheTime - res, err := extents[i].toResponse() + res, err := extents[i].ToResponse() if err != nil { return nil, err } @@ -608,13 +614,13 @@ func (s resultsCache) filterRecentExtents(req tripperware.Request, maxCacheFresh return extents, nil } -func (s resultsCache) get(ctx context.Context, key string) ([]Extent, bool) { +func (s resultsCache) get(ctx context.Context, key string) ([]tripperware.Extent, bool) { found, bufs, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)}) if len(found) != 1 { return nil, false } - var resp CachedResponse + var resp tripperware.CachedResponse log, ctx := spanlogger.New(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck defer log.Finish() @@ -640,8 +646,8 @@ func (s resultsCache) get(ctx context.Context, key string) ([]Extent, bool) { return resp.Extents, true } -func (s resultsCache) put(ctx context.Context, key string, extents []Extent) { - buf, err := proto.Marshal(&CachedResponse{ +func (s resultsCache) put(ctx context.Context, key string, extents []tripperware.Extent) { + buf, err := proto.Marshal(&tripperware.CachedResponse{ Key: key, Extents: extents, }) @@ -702,7 +708,7 @@ func extractStats(start, end int64, stats *tripperware.PrometheusResponseStats) return result } -func extractMatrix(start, end int64, matrix []tripperware.SampleStream) []tripperware.SampleStream { +func extractSampleStreams(start, end int64, matrix []tripperware.SampleStream) []tripperware.SampleStream { result := make([]tripperware.SampleStream, 0, len(matrix)) for _, stream := range matrix { extracted, ok := extractSampleStream(start, end, stream) @@ -728,20 +734,3 @@ func extractSampleStream(start, end int64, stream tripperware.SampleStream) (tri } return result, true } - -func (e *Extent) toResponse() (tripperware.Response, error) { - msg, err := types.EmptyAny(e.Response) - if err != nil { - return nil, err - } - - if err := types.UnmarshalAny(e.Response, msg); err != nil { - return nil, err - } - - resp, ok := msg.(tripperware.Response) - if !ok { - return nil, fmt.Errorf("bad cached type") - } - return resp, nil -} diff --git a/pkg/querier/tripperware/queryrange/results_cache_test.go b/pkg/querier/tripperware/queryrange/results_cache_test.go index b2de689131..1c55a8b47c 100644 --- a/pkg/querier/tripperware/queryrange/results_cache_test.go +++ b/pkg/querier/tripperware/queryrange/results_cache_test.go @@ -3,6 +3,7 @@ package queryrange import ( "context" "fmt" + "net/http" "strconv" "testing" "time" @@ -28,7 +29,7 @@ const ( ) var ( - parsedRequest = &PrometheusRequest{ + parsedRequest = &tripperware.PrometheusRequest{ Path: "/api/v1/query_range", Start: 1536673680 * 1e3, End: 1536716898 * 1e3, @@ -36,28 +37,24 @@ var ( Query: "sum(container_memory_rss) by (namespace)", Stats: "all", } - reqHeaders = []*tripperware.PrometheusRequestHeader{ - { - Name: "Test-Header", - Values: []string{"test"}, - }, - } - noCacheRequest = &PrometheusRequest{ + reqHeaders = http.Header(map[string][]string{"Test-Header": {"test"}}) + + noCacheRequest = &tripperware.PrometheusRequest{ Path: "/api/v1/query_range", Start: 1536673680 * 1e3, End: 1536716898 * 1e3, Step: 120 * 1e3, Query: "sum(container_memory_rss) by (namespace)", - CachingOptions: CachingOptions{Disabled: true}, + CachingOptions: tripperware.CachingOptions{Disabled: true}, } - noCacheRequestWithStats = &PrometheusRequest{ + noCacheRequestWithStats = &tripperware.PrometheusRequest{ Path: "/api/v1/query_range", Start: 1536673680 * 1e3, End: 1536716898 * 1e3, Step: 120 * 1e3, Stats: "all", Query: "sum(container_memory_rss) by (namespace)", - CachingOptions: CachingOptions{Disabled: true}, + CachingOptions: tripperware.CachingOptions{Disabled: true}, } respHeadersJson = []*tripperware.PrometheusResponseHeader{ { @@ -72,38 +69,49 @@ var ( Values: []string{applicationProtobuf}, }, } - - parsedResponse = &PrometheusResponse{ + parsedResponse = &tripperware.PrometheusResponse{ Status: "success", - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, }, } - parsedResponseWithWarnings = &PrometheusResponse{ + parsedResponseWithWarnings = &tripperware.PrometheusResponse{ Status: "success", Warnings: []string{"test-warn"}, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: model.ValMatrix.String(), - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, - }, - Samples: []cortexpb.Sample{ - {Value: 137, TimestampMs: 1536673680000}, - {Value: 137, TimestampMs: 1536673780000}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: []cortexpb.Sample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + }, + }, }, }, }, @@ -111,11 +119,11 @@ var ( } ) -func mkAPIResponse(start, end, step int64) *PrometheusResponse { +func mkAPIResponse(start, end, step int64) *tripperware.PrometheusResponse { return mkAPIResponseWithStats(start, end, step, false) } -func mkAPIResponseWithStats(start, end, step int64, withStats bool) *PrometheusResponse { +func mkAPIResponseWithStats(start, end, step int64, withStats bool) *tripperware.PrometheusResponse { var samples []cortexpb.Sample var stats *tripperware.PrometheusResponseStats if withStats { @@ -137,42 +145,48 @@ func mkAPIResponseWithStats(start, end, step int64, withStats bool) *PrometheusR } } - return &PrometheusResponse{ + return &tripperware.PrometheusResponse{ Status: StatusSuccess, - Data: PrometheusData{ + Data: tripperware.PrometheusData{ ResultType: matrix, Stats: stats, - Result: []tripperware.SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, + Result: tripperware.PrometheusQueryResult{ + Result: &tripperware.PrometheusQueryResult_Matrix{ + Matrix: &tripperware.Matrix{ + SampleStreams: []tripperware.SampleStream{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Samples: samples, + }, + }, }, - Samples: samples, }, }, }, } } -func mkExtentWithStats(start, end int64) Extent { +func mkExtentWithStats(start, end int64) tripperware.Extent { return mkExtentWithStepWithStats(start, end, 10, true) } -func mkExtent(start, end int64) Extent { +func mkExtent(start, end int64) tripperware.Extent { return mkExtentWithStepWithStats(start, end, 10, false) } -func mkExtentWithStep(start, end, step int64) Extent { +func mkExtentWithStep(start, end, step int64) tripperware.Extent { return mkExtentWithStepWithStats(start, end, step, false) } -func mkExtentWithStepWithStats(start, end, step int64, withStats bool) Extent { +func mkExtentWithStepWithStats(start, end, step int64, withStats bool) tripperware.Extent { res := mkAPIResponseWithStats(start, end, step, withStats) any, err := types.MarshalAny(res) if err != nil { panic(err) } - return Extent{ + return tripperware.Extent{ Start: start, End: end, Response: any, @@ -275,8 +289,8 @@ func TestShouldCache(t *testing.T) { // Tests only for cacheControlHeader { name: "does not contain the cacheControl header", - request: &PrometheusRequest{Query: "metric"}, - input: tripperware.Response(&PrometheusResponse{ + request: &tripperware.PrometheusRequest{Query: "metric"}, + input: tripperware.Response(&tripperware.PrometheusResponse{ Headers: []*tripperware.PrometheusResponseHeader{ { Name: "meaninglessheader", @@ -288,8 +302,8 @@ func TestShouldCache(t *testing.T) { }, { name: "does contain the cacheControl header which has the value", - request: &PrometheusRequest{Query: "metric"}, - input: tripperware.Response(&PrometheusResponse{ + request: &tripperware.PrometheusRequest{Query: "metric"}, + input: tripperware.Response(&tripperware.PrometheusResponse{ Headers: []*tripperware.PrometheusResponseHeader{ { Name: cacheControlHeader, @@ -301,8 +315,8 @@ func TestShouldCache(t *testing.T) { }, { name: "cacheControl header contains extra values but still good", - request: &PrometheusRequest{Query: "metric"}, - input: tripperware.Response(&PrometheusResponse{ + request: &tripperware.PrometheusRequest{Query: "metric"}, + input: tripperware.Response(&tripperware.PrometheusResponse{ Headers: []*tripperware.PrometheusResponseHeader{ { Name: cacheControlHeader, @@ -314,22 +328,22 @@ func TestShouldCache(t *testing.T) { }, { name: "broken response", - request: &PrometheusRequest{Query: "metric"}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric"}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "nil headers", - request: &PrometheusRequest{Query: "metric"}, - input: tripperware.Response(&PrometheusResponse{ + request: &tripperware.PrometheusRequest{Query: "metric"}, + input: tripperware.Response(&tripperware.PrometheusResponse{ Headers: []*tripperware.PrometheusResponseHeader{nil}, }), expected: true, }, { name: "had cacheControl header but no values", - request: &PrometheusRequest{Query: "metric"}, - input: tripperware.Response(&PrometheusResponse{ + request: &tripperware.PrometheusRequest{Query: "metric"}, + input: tripperware.Response(&tripperware.PrometheusResponse{ Headers: []*tripperware.PrometheusResponseHeader{{Name: cacheControlHeader}}, }), expected: true, @@ -337,151 +351,151 @@ func TestShouldCache(t *testing.T) { // @ modifier on vector selectors. { name: "@ modifier on vector selector, before end, before maxCacheTime", - request: &PrometheusRequest{Query: "metric @ 123", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric @ 123", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "@ modifier on vector selector, after end, before maxCacheTime", - request: &PrometheusRequest{Query: "metric @ 127", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric @ 127", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on vector selector, before end, after maxCacheTime", - request: &PrometheusRequest{Query: "metric @ 151", End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric @ 151", End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on vector selector, after end, after maxCacheTime", - request: &PrometheusRequest{Query: "metric @ 151", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric @ 151", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on vector selector with start() before maxCacheTime", - request: &PrometheusRequest{Query: "metric @ start()", Start: 100000, End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric @ start()", Start: 100000, End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "@ modifier on vector selector with end() after maxCacheTime", - request: &PrometheusRequest{Query: "metric @ end()", Start: 100000, End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric @ end()", Start: 100000, End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, // @ modifier on matrix selectors. { name: "@ modifier on matrix selector, before end, before maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ 123)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "rate(metric[5m] @ 123)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "@ modifier on matrix selector, after end, before maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ 127)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "rate(metric[5m] @ 127)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on matrix selector, before end, after maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on matrix selector, after end, after maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on matrix selector with start() before maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ start())", Start: 100000, End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "rate(metric[5m] @ start())", Start: 100000, End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "@ modifier on matrix selector with end() after maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ end())", Start: 100000, End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "rate(metric[5m] @ end())", Start: 100000, End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, // @ modifier on subqueries. { name: "@ modifier on subqueries, before end, before maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 123)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 123)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "@ modifier on subqueries, after end, before maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 127)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 127)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on subqueries, before end, after maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on subqueries, after end, after maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, { name: "@ modifier on subqueries with start() before maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ start())", Start: 100000, End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ start())", Start: 100000, End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "@ modifier on subqueries with end() after maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ end())", Start: 100000, End: 200000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ end())", Start: 100000, End: 200000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, // offset on vector selectors. { name: "positive offset on vector selector", - request: &PrometheusRequest{Query: "metric offset 10ms", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric offset 10ms", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "negative offset on vector selector", - request: &PrometheusRequest{Query: "metric offset -10ms", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "metric offset -10ms", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, // offset on matrix selectors. { name: "positive offset on matrix selector", - request: &PrometheusRequest{Query: "rate(metric[5m] offset 10ms)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "rate(metric[5m] offset 10ms)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "negative offset on matrix selector", - request: &PrometheusRequest{Query: "rate(metric[5m] offset -10ms)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "rate(metric[5m] offset -10ms)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, // offset on subqueries. { name: "positive offset on subqueries", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] offset 10ms)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] offset 10ms)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: true, }, { name: "negative offset on subqueries", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] offset -10ms)", End: 125000}, - input: tripperware.Response(&PrometheusResponse{}), + request: &tripperware.PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] offset -10ms)", End: 125000}, + input: tripperware.Response(&tripperware.PrometheusResponse{}), expected: false, }, } { @@ -500,17 +514,17 @@ func TestPartition(t *testing.T) { for _, tc := range []struct { name string input tripperware.Request - prevCachedResponse []Extent + prevCachedResponse []tripperware.Extent expectedRequests []tripperware.Request expectedCachedResponse []tripperware.Response }{ { name: "Test a complete hit.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtent(0, 100), }, expectedCachedResponse: []tripperware.Response{ @@ -520,30 +534,30 @@ func TestPartition(t *testing.T) { { name: "Test with a complete miss.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtent(110, 210), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: 100, }}, }, { name: "Test a partial hit.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtent(50, 100), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: 50, }, @@ -554,16 +568,16 @@ func TestPartition(t *testing.T) { }, { name: "Test multiple partial hits.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 200, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtent(50, 120), mkExtent(160, 250), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 120, End: 160, }, @@ -575,16 +589,16 @@ func TestPartition(t *testing.T) { }, { name: "Partial hits with tiny gap.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 160, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtent(50, 120), mkExtent(122, 130), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 120, End: 160, }, @@ -595,15 +609,15 @@ func TestPartition(t *testing.T) { }, { name: "Extent is outside the range and the request has a single step (same start and end).", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtent(50, 90), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 100, End: 100, }, @@ -612,11 +626,11 @@ func TestPartition(t *testing.T) { { name: "Test when hit has a large step and only a single sample extent.", // If there is a only a single sample in the split interval, start and end will be the same. - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtent(100, 100), }, expectedCachedResponse: []tripperware.Response{ @@ -625,11 +639,11 @@ func TestPartition(t *testing.T) { }, { name: "[Stats] Test a complete hit.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtentWithStats(0, 100), }, expectedCachedResponse: []tripperware.Response{ @@ -639,30 +653,30 @@ func TestPartition(t *testing.T) { { name: "[Stats] Test with a complete miss.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtentWithStats(110, 210), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: 100, }}, }, { name: "[stats] Test a partial hit.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtentWithStats(50, 100), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: 50, }, @@ -673,16 +687,16 @@ func TestPartition(t *testing.T) { }, { name: "[stats] Test multiple partial hits.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 200, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtentWithStats(50, 120), mkExtentWithStats(160, 250), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 120, End: 160, }, @@ -694,16 +708,16 @@ func TestPartition(t *testing.T) { }, { name: "[stats] Partial hits with tiny gap.", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 160, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtentWithStats(50, 120), mkExtentWithStats(122, 130), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 120, End: 160, }, @@ -714,15 +728,15 @@ func TestPartition(t *testing.T) { }, { name: "[stats] Extent is outside the range and the request has a single step (same start and end).", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtentWithStats(50, 90), }, expectedRequests: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 100, End: 100, }, @@ -731,11 +745,11 @@ func TestPartition(t *testing.T) { { name: "[stats] Test when hit has a large step and only a single sample extent.", // If there is a only a single sample in the split interval, start and end will be the same. - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 100, }, - prevCachedResponse: []Extent{ + prevCachedResponse: []tripperware.Extent{ mkExtentWithStats(100, 100), }, expectedCachedResponse: []tripperware.Response{ @@ -763,24 +777,24 @@ func TestHandleHit(t *testing.T) { for _, tc := range []struct { name string input tripperware.Request - cachedEntry []Extent - expectedUpdatedCachedEntry []Extent + cachedEntry []tripperware.Extent + expectedUpdatedCachedEntry []tripperware.Extent }{ { name: "Should drop tiny extent that overlaps with non-tiny request only", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 120, Step: 5, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(0, 50, 5), mkExtentWithStep(60, 65, 5), mkExtentWithStep(100, 105, 5), mkExtentWithStep(110, 150, 5), mkExtentWithStep(160, 165, 5), }, - expectedUpdatedCachedEntry: []Extent{ + expectedUpdatedCachedEntry: []tripperware.Extent{ mkExtentWithStep(0, 50, 5), mkExtentWithStep(60, 65, 5), mkExtentWithStep(100, 150, 5), @@ -789,12 +803,12 @@ func TestHandleHit(t *testing.T) { }, { name: "Should replace tiny extents that are cover by bigger request", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 200, Step: 5, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(0, 50, 5), mkExtentWithStep(60, 65, 5), mkExtentWithStep(100, 105, 5), @@ -803,7 +817,7 @@ func TestHandleHit(t *testing.T) { mkExtentWithStep(220, 225, 5), mkExtentWithStep(240, 250, 5), }, - expectedUpdatedCachedEntry: []Extent{ + expectedUpdatedCachedEntry: []tripperware.Extent{ mkExtentWithStep(0, 50, 5), mkExtentWithStep(60, 65, 5), mkExtentWithStep(100, 200, 5), @@ -813,12 +827,12 @@ func TestHandleHit(t *testing.T) { }, { name: "Should not drop tiny extent that completely overlaps with tiny request", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 105, Step: 5, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(0, 50, 5), mkExtentWithStep(60, 65, 5), mkExtentWithStep(100, 105, 5), @@ -828,12 +842,12 @@ func TestHandleHit(t *testing.T) { }, { name: "Should not drop tiny extent that partially center-overlaps with tiny request", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 106, End: 108, Step: 2, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 64, 2), mkExtentWithStep(104, 110, 2), mkExtentWithStep(160, 166, 2), @@ -842,17 +856,17 @@ func TestHandleHit(t *testing.T) { }, { name: "Should not drop tiny extent that partially left-overlaps with tiny request", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 106, Step: 2, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 64, 2), mkExtentWithStep(104, 110, 2), mkExtentWithStep(160, 166, 2), }, - expectedUpdatedCachedEntry: []Extent{ + expectedUpdatedCachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 64, 2), mkExtentWithStep(100, 110, 2), mkExtentWithStep(160, 166, 2), @@ -860,17 +874,17 @@ func TestHandleHit(t *testing.T) { }, { name: "Should not drop tiny extent that partially right-overlaps with tiny request", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 106, Step: 2, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 64, 2), mkExtentWithStep(98, 102, 2), mkExtentWithStep(160, 166, 2), }, - expectedUpdatedCachedEntry: []Extent{ + expectedUpdatedCachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 64, 2), mkExtentWithStep(98, 106, 2), mkExtentWithStep(160, 166, 2), @@ -878,57 +892,57 @@ func TestHandleHit(t *testing.T) { }, { name: "Should merge fragmented extents if request fills the hole", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 40, End: 80, Step: 20, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(0, 20, 20), mkExtentWithStep(80, 100, 20), }, - expectedUpdatedCachedEntry: []Extent{ + expectedUpdatedCachedEntry: []tripperware.Extent{ mkExtentWithStep(0, 100, 20), }, }, { name: "Should left-extend extent if request starts earlier than extent in cache", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 40, End: 80, Step: 20, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 160, 20), }, - expectedUpdatedCachedEntry: []Extent{ + expectedUpdatedCachedEntry: []tripperware.Extent{ mkExtentWithStep(40, 160, 20), }, }, { name: "Should right-extend extent if request ends later than extent in cache", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 100, End: 180, Step: 20, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 160, 20), }, - expectedUpdatedCachedEntry: []Extent{ + expectedUpdatedCachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 180, 20), }, }, { name: "Should not throw error if complete-overlapped smaller Extent is erroneous", - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ // This request is carefully created such that cachedEntry is not used to fulfill // the request. Start: 160, End: 180, Step: 20, }, - cachedEntry: []Extent{ + cachedEntry: []tripperware.Extent{ { Start: 60, End: 80, @@ -940,7 +954,7 @@ func TestHandleHit(t *testing.T) { }, mkExtentWithStep(60, 160, 20), }, - expectedUpdatedCachedEntry: []Extent{ + expectedUpdatedCachedEntry: []tripperware.Extent{ mkExtentWithStep(60, 180, 20), }, }, @@ -1059,7 +1073,7 @@ func TestResultsCacheMaxFreshness(t *testing.T) { for i, tc := range []struct { fakeLimits tripperware.Limits Handler tripperware.HandlerFunc - expectedResponse *PrometheusResponse + expectedResponse *tripperware.PrometheusResponse }{ { fakeLimits: mockLimits{maxCacheFreshness: 5 * time.Second}, @@ -1104,7 +1118,7 @@ func TestResultsCacheMaxFreshness(t *testing.T) { // fill cache key := constSplitter(day).GenerateCacheKey("1", req) - rc.(*resultsCache).put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))}) + rc.(*resultsCache).put(ctx, key, []tripperware.Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))}) resp, err := rc.Do(ctx, req) require.NoError(t, err) @@ -1135,13 +1149,13 @@ func Test_resultsCache_MissingData(t *testing.T) { ctx := context.Background() // fill up the cache - rc.put(ctx, "empty", []Extent{{ + rc.put(ctx, "empty", []tripperware.Extent{{ Start: 100, End: 200, Response: nil, }}) - rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)}) - rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), { + rc.put(ctx, "notempty", []tripperware.Extent{mkExtent(100, 120)}) + rc.put(ctx, "mixed", []tripperware.Extent{mkExtent(100, 120), { Start: 120, End: 200, Response: nil, @@ -1169,14 +1183,14 @@ func TestConstSplitter_generateCacheKey(t *testing.T) { interval time.Duration want string }{ - {"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"}, - {"<30m", &PrometheusRequest{Start: toMs(10 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"}, - {"30m", &PrometheusRequest{Start: toMs(30 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:1"}, - {"91m", &PrometheusRequest{Start: toMs(91 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:3"}, - {"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"}, - {"<1d", &PrometheusRequest{Start: toMs(22 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"}, - {"4d", &PrometheusRequest{Start: toMs(4 * 24 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:4"}, - {"3d5h", &PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"}, + {"0", &tripperware.PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"}, + {"<30m", &tripperware.PrometheusRequest{Start: toMs(10 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"}, + {"30m", &tripperware.PrometheusRequest{Start: toMs(30 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:1"}, + {"91m", &tripperware.PrometheusRequest{Start: toMs(91 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:3"}, + {"0", &tripperware.PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"}, + {"<1d", &tripperware.PrometheusRequest{Start: toMs(22 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"}, + {"4d", &tripperware.PrometheusRequest{Start: toMs(4 * 24 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:4"}, + {"3d5h", &tripperware.PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"}, } for _, tt := range tests { tt := tt @@ -1214,7 +1228,7 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) { { name: "check cache based on request", shouldCache: func(r tripperware.Request) bool { - if v, ok := r.(*PrometheusRequest); ok { + if v, ok := r.(*tripperware.PrometheusRequest); ok { return !v.CachingOptions.Disabled } return false diff --git a/pkg/querier/tripperware/queryrange/split_by_interval_test.go b/pkg/querier/tripperware/queryrange/split_by_interval_test.go index e3feb76b26..4d7e4a9a68 100644 --- a/pkg/querier/tripperware/queryrange/split_by_interval_test.go +++ b/pkg/querier/tripperware/queryrange/split_by_interval_test.go @@ -72,14 +72,14 @@ func TestSplitQuery(t *testing.T) { interval time.Duration }{ { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 60 * 60 * seconds, Step: 15 * seconds, Query: "foo", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: 60 * 60 * seconds, Step: 15 * seconds, @@ -89,14 +89,14 @@ func TestSplitQuery(t *testing.T) { interval: day, }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 60 * 60 * seconds, End: 60 * 60 * seconds, Step: 15 * seconds, Query: "foo", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 60 * 60 * seconds, End: 60 * 60 * seconds, Step: 15 * seconds, @@ -106,14 +106,14 @@ func TestSplitQuery(t *testing.T) { interval: day, }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 60 * 60 * seconds, Step: 15 * seconds, Query: "foo", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: 60 * 60 * seconds, Step: 15 * seconds, @@ -123,14 +123,14 @@ func TestSplitQuery(t *testing.T) { interval: 3 * time.Hour, }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: 24 * 3600 * seconds, Step: 15 * seconds, @@ -140,14 +140,14 @@ func TestSplitQuery(t *testing.T) { interval: day, }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: 3 * 3600 * seconds, Step: 15 * seconds, @@ -157,20 +157,20 @@ func TestSplitQuery(t *testing.T) { interval: 3 * time.Hour, }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 2 * 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo @ start()", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: (24 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo @ 0.000", }, - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 24 * 3600 * seconds, End: 2 * 24 * 3600 * seconds, Step: 15 * seconds, @@ -180,20 +180,20 @@ func TestSplitQuery(t *testing.T) { interval: day, }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 2 * 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 0, End: (3 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo", }, - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 3 * 3600 * seconds, End: 2 * 3 * 3600 * seconds, Step: 15 * seconds, @@ -203,26 +203,26 @@ func TestSplitQuery(t *testing.T) { interval: 3 * time.Hour, }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 3 * 3600 * seconds, End: 3 * 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 3 * 3600 * seconds, End: (24 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo", }, - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 24 * 3600 * seconds, End: (2 * 24 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo", }, - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 2 * 24 * 3600 * seconds, End: 3 * 24 * 3600 * seconds, Step: 15 * seconds, @@ -232,26 +232,26 @@ func TestSplitQuery(t *testing.T) { interval: day, }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 2 * 3600 * seconds, End: 3 * 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo", }, expected: []tripperware.Request{ - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 2 * 3600 * seconds, End: (3 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo", }, - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 3 * 3600 * seconds, End: (2 * 3 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo", }, - &PrometheusRequest{ + &tripperware.PrometheusRequest{ Start: 2 * 3 * 3600 * seconds, End: 3 * 3 * 3600 * seconds, Step: 15 * seconds, diff --git a/pkg/querier/tripperware/queryrange/step_align_test.go b/pkg/querier/tripperware/queryrange/step_align_test.go index b5e4ce5a98..ac197b5b46 100644 --- a/pkg/querier/tripperware/queryrange/step_align_test.go +++ b/pkg/querier/tripperware/queryrange/step_align_test.go @@ -12,15 +12,15 @@ import ( func TestStepAlign(t *testing.T) { for i, tc := range []struct { - input, expected *PrometheusRequest + input, expected *tripperware.PrometheusRequest }{ { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 0, End: 100, Step: 10, }, - expected: &PrometheusRequest{ + expected: &tripperware.PrometheusRequest{ Start: 0, End: 100, Step: 10, @@ -28,12 +28,12 @@ func TestStepAlign(t *testing.T) { }, { - input: &PrometheusRequest{ + input: &tripperware.PrometheusRequest{ Start: 2, End: 102, Step: 10, }, - expected: &PrometheusRequest{ + expected: &tripperware.PrometheusRequest{ Start: 0, End: 100, Step: 10, @@ -43,10 +43,10 @@ func TestStepAlign(t *testing.T) { tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() - var result *PrometheusRequest + var result *tripperware.PrometheusRequest s := stepAlign{ next: tripperware.HandlerFunc(func(_ context.Context, req tripperware.Request) (tripperware.Response, error) { - result = req.(*PrometheusRequest) + result = req.(*tripperware.PrometheusRequest) return nil, nil }), } diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go index 3fc87ee4c6..90e32b7aff 100644 --- a/pkg/querier/worker/worker.go +++ b/pkg/querier/worker/worker.go @@ -45,7 +45,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.MatchMaxConcurrency, "querier.worker-match-max-concurrent", false, "Force worker concurrency to match the -querier.max-concurrent option. Overrides querier.worker-parallelism.") f.StringVar(&cfg.QuerierID, "querier.id", "", "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.") - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f) + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", "", f) } func (cfg *Config) Validate(log log.Logger) error { diff --git a/pkg/ring/lifecycler.go b/pkg/ring/lifecycler.go index d4f1e5735b..6e55aba13e 100644 --- a/pkg/ring/lifecycler.go +++ b/pkg/ring/lifecycler.go @@ -910,7 +910,10 @@ func (i *Lifecycler) changeState(ctx context.Context, state InstanceState) error (currState == JOINING && state == PENDING) || // triggered by TransferChunks on failure (currState == JOINING && state == ACTIVE) || // triggered by TransferChunks on success (currState == PENDING && state == ACTIVE) || // triggered by autoJoin - (currState == ACTIVE && state == LEAVING)) { // triggered by shutdown + (currState == ACTIVE && state == LEAVING) || // triggered by shutdown + (currState == ACTIVE && state == READONLY) || // triggered by ingester mode + (currState == READONLY && state == ACTIVE) || // triggered by ingester mode + (currState == READONLY && state == LEAVING)) { // triggered by shutdown return fmt.Errorf("Changing instance state from %v -> %v is disallowed", currState, state) } diff --git a/pkg/ring/ring.go b/pkg/ring/ring.go index 38b1d48489..7377cbcccd 100644 --- a/pkg/ring/ring.go +++ b/pkg/ring/ring.go @@ -106,13 +106,17 @@ var ( }) // WriteNoExtend is like Write, but with no replicaset extension. - WriteNoExtend = NewOp([]InstanceState{ACTIVE}, nil) + WriteNoExtend = NewOp([]InstanceState{ACTIVE}, func(s InstanceState) bool { + // We want to skip instances that are READONLY. So we will increase the size of replication + // for the key + return s == READONLY + }) - // Read operation that extends the replica set if an instance is not ACTIVE, LEAVING OR JOINING - Read = NewOp([]InstanceState{ACTIVE, PENDING, LEAVING, JOINING}, func(s InstanceState) bool { + // Read operation that extends the replica set if an instance is not ACTIVE, PENDING, LEAVING, JOINING OR READONLY + Read = NewOp([]InstanceState{ACTIVE, PENDING, LEAVING, JOINING, READONLY}, func(s InstanceState) bool { // To match Write with extended replica set we have to also increase the // size of the replica set for Read, but we can read from LEAVING ingesters. - return s != ACTIVE && s != LEAVING && s != JOINING + return s != ACTIVE && s != LEAVING && s != JOINING && s != READONLY }) // Reporting is a special value for inquiring about health. @@ -661,7 +665,7 @@ func (r *Ring) updateRingMetrics(compareResult CompareResult) { oldestTimestampByState := map[string]int64{} // Initialized to zero so we emit zero-metrics (instead of not emitting anything) - for _, s := range []string{unhealthy, ACTIVE.String(), LEAVING.String(), PENDING.String(), JOINING.String()} { + for _, s := range []string{unhealthy, ACTIVE.String(), LEAVING.String(), PENDING.String(), JOINING.String(), READONLY.String()} { numByState[s] = 0 oldestTimestampByState[s] = 0 } @@ -995,7 +999,7 @@ func NewOp(healthyStates []InstanceState, shouldExtendReplicaSet func(s Instance } if shouldExtendReplicaSet != nil { - for _, s := range []InstanceState{ACTIVE, LEAVING, PENDING, JOINING, LEFT} { + for _, s := range []InstanceState{ACTIVE, LEAVING, PENDING, JOINING, LEFT, READONLY} { if shouldExtendReplicaSet(s) { op |= (0x10000 << s) } diff --git a/pkg/ring/ring.pb.go b/pkg/ring/ring.pb.go index bba74142c5..87a4c59b54 100644 --- a/pkg/ring/ring.pb.go +++ b/pkg/ring/ring.pb.go @@ -36,7 +36,8 @@ const ( JOINING InstanceState = 3 // This state is only used by gossiping code to distribute information about // instances that have been removed from the ring. Ring users should not use it directly. - LEFT InstanceState = 4 + LEFT InstanceState = 4 + READONLY InstanceState = 5 ) var InstanceState_name = map[int32]string{ @@ -45,14 +46,16 @@ var InstanceState_name = map[int32]string{ 2: "PENDING", 3: "JOINING", 4: "LEFT", + 5: "READONLY", } var InstanceState_value = map[string]int32{ - "ACTIVE": 0, - "LEAVING": 1, - "PENDING": 2, - "JOINING": 3, - "LEFT": 4, + "ACTIVE": 0, + "LEAVING": 1, + "PENDING": 2, + "JOINING": 3, + "LEFT": 4, + "READONLY": 5, } func (InstanceState) EnumDescriptor() ([]byte, []int) { @@ -210,33 +213,34 @@ func init() { func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } var fileDescriptor_26381ed67e202a6e = []byte{ - // 409 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x8a, 0xd3, 0x50, - 0x18, 0x85, 0xef, 0x9f, 0xdc, 0x64, 0xd2, 0xbf, 0xce, 0x10, 0xee, 0x0c, 0x12, 0x07, 0xb9, 0x86, - 0x59, 0x45, 0x17, 0x15, 0xab, 0x0b, 0x11, 0x5c, 0xcc, 0x38, 0x51, 0x12, 0x4a, 0x1d, 0x62, 0x99, - 0xad, 0xc4, 0xe9, 0x25, 0x84, 0xb1, 0x49, 0x49, 0xae, 0x42, 0x5d, 0xf9, 0x08, 0xbe, 0x80, 0x7b, - 0x1f, 0xa5, 0xcb, 0xae, 0xa4, 0x2b, 0xb1, 0xe9, 0xc6, 0x65, 0x1f, 0x41, 0x6e, 0xd2, 0x92, 0xe9, - 0xee, 0x9c, 0xff, 0x9c, 0x9c, 0x2f, 0x81, 0x20, 0x16, 0x69, 0x96, 0xf4, 0xa6, 0x45, 0x2e, 0x73, - 0x46, 0x95, 0x3e, 0x3d, 0x49, 0xf2, 0x24, 0xaf, 0x0f, 0x4f, 0x95, 0x6a, 0xb2, 0xb3, 0x9f, 0x80, - 0xf4, 0x52, 0x94, 0x37, 0xec, 0x35, 0x76, 0xd2, 0x2c, 0x11, 0xa5, 0x14, 0x45, 0xe9, 0x80, 0xab, - 0x7b, 0xdd, 0xfe, 0x83, 0x5e, 0x3d, 0xa2, 0xe2, 0x5e, 0xb0, 0xcb, 0xfc, 0x4c, 0x16, 0xb3, 0x0b, - 0x3a, 0xff, 0xf3, 0x88, 0x44, 0xed, 0x13, 0xa7, 0x57, 0x78, 0xb4, 0x5f, 0x61, 0x36, 0xea, 0xb7, - 0x62, 0xe6, 0x80, 0x0b, 0x5e, 0x27, 0x52, 0x92, 0x79, 0x68, 0x7c, 0x8d, 0x3f, 0x7f, 0x11, 0x8e, - 0xe6, 0x82, 0xd7, 0xed, 0xb3, 0x66, 0x3e, 0xc8, 0x4a, 0x19, 0x67, 0x37, 0x42, 0x61, 0xa2, 0xa6, - 0xf0, 0x4a, 0x7b, 0x09, 0x21, 0xb5, 0x34, 0x5b, 0x3f, 0xfb, 0x0d, 0x78, 0xef, 0x6e, 0x83, 0x31, - 0xa4, 0xf1, 0x78, 0x5c, 0x6c, 0x77, 0x6b, 0xcd, 0x1e, 0x62, 0x47, 0xa6, 0x13, 0x51, 0xca, 0x78, - 0x32, 0xad, 0xc7, 0xf5, 0xa8, 0x3d, 0xb0, 0xc7, 0x68, 0x94, 0x32, 0x96, 0xc2, 0xd1, 0x5d, 0xf0, - 0x8e, 0xfa, 0xc7, 0xfb, 0xd8, 0x0f, 0x2a, 0x8a, 0x9a, 0x06, 0xbb, 0x8f, 0xa6, 0xcc, 0x6f, 0x45, - 0x56, 0x3a, 0xa6, 0xab, 0x7b, 0x87, 0xd1, 0xd6, 0x29, 0xe8, 0xb7, 0x3c, 0x13, 0xce, 0x41, 0x03, - 0x55, 0x9a, 0x3d, 0xc3, 0x93, 0x42, 0x24, 0xa9, 0xfa, 0x62, 0x31, 0xfe, 0xd8, 0xf2, 0xad, 0x9a, - 0x7f, 0xdc, 0x66, 0xa3, 0x5d, 0x14, 0x52, 0x8b, 0xda, 0x46, 0x48, 0x2d, 0xc3, 0x36, 0x9f, 0x0c, - 0xf0, 0x70, 0xef, 0x15, 0x18, 0xa2, 0x79, 0xfe, 0x66, 0x14, 0x5c, 0xfb, 0x36, 0x61, 0x5d, 0x3c, - 0x18, 0xf8, 0xe7, 0xd7, 0xc1, 0xf0, 0x9d, 0x0d, 0xca, 0x5c, 0xf9, 0xc3, 0x4b, 0x65, 0x34, 0x65, - 0xc2, 0xf7, 0xc1, 0x50, 0x19, 0x9d, 0x59, 0x48, 0x07, 0xfe, 0xdb, 0x91, 0x4d, 0x2f, 0x5e, 0x2c, - 0x56, 0x9c, 0x2c, 0x57, 0x9c, 0x6c, 0x56, 0x1c, 0xbe, 0x57, 0x1c, 0x7e, 0x55, 0x1c, 0xe6, 0x15, - 0x87, 0x45, 0xc5, 0xe1, 0x6f, 0xc5, 0xe1, 0x5f, 0xc5, 0xc9, 0xa6, 0xe2, 0xf0, 0x63, 0xcd, 0xc9, - 0x62, 0xcd, 0xc9, 0x72, 0xcd, 0xc9, 0x27, 0xb3, 0xfe, 0x07, 0x9e, 0xff, 0x0f, 0x00, 0x00, 0xff, - 0xff, 0xd3, 0x1c, 0x09, 0x3a, 0x2d, 0x02, 0x00, 0x00, + // 423 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x41, 0x6b, 0xd4, 0x40, + 0x1c, 0xc5, 0xe7, 0xbf, 0x99, 0xa4, 0xd9, 0xff, 0xb6, 0x65, 0x98, 0x16, 0x89, 0x45, 0xc6, 0xd0, + 0x53, 0xf4, 0xb0, 0xe2, 0xea, 0x41, 0x04, 0x0f, 0x5b, 0x37, 0x4a, 0xc2, 0x92, 0x96, 0xb8, 0x14, + 0xf4, 0x22, 0xb1, 0x3b, 0x84, 0x50, 0x9b, 0x94, 0x64, 0x14, 0xea, 0xc9, 0x8f, 0xe0, 0x17, 0xf0, + 0xee, 0x47, 0xe9, 0x71, 0x4f, 0xd2, 0x93, 0xb8, 0xd9, 0x8b, 0xc7, 0x7e, 0x04, 0x99, 0xa4, 0x25, + 0xdd, 0xdb, 0x7b, 0xff, 0xf7, 0xf2, 0x7e, 0x09, 0x04, 0xb1, 0xcc, 0xf2, 0x74, 0x78, 0x5e, 0x16, + 0xaa, 0xe0, 0x54, 0xeb, 0xbd, 0xdd, 0xb4, 0x48, 0x8b, 0xe6, 0xf0, 0x44, 0xab, 0x36, 0xdb, 0xff, + 0x09, 0x48, 0x27, 0xb2, 0x3a, 0xe1, 0xaf, 0xb0, 0x9f, 0xe5, 0xa9, 0xac, 0x94, 0x2c, 0x2b, 0x07, + 0x5c, 0xc3, 0x1b, 0x8c, 0xee, 0x0f, 0x9b, 0x11, 0x1d, 0x0f, 0x83, 0xdb, 0xcc, 0xcf, 0x55, 0x79, + 0x71, 0x40, 0x2f, 0xff, 0x3c, 0x24, 0x71, 0xf7, 0xc4, 0xde, 0x11, 0x6e, 0xaf, 0x57, 0x38, 0x43, + 0xe3, 0x54, 0x5e, 0x38, 0xe0, 0x82, 0xd7, 0x8f, 0xb5, 0xe4, 0x1e, 0x9a, 0x5f, 0x93, 0xcf, 0x5f, + 0xa4, 0xd3, 0x73, 0xc1, 0x1b, 0x8c, 0x78, 0x3b, 0x1f, 0xe4, 0x95, 0x4a, 0xf2, 0x13, 0xa9, 0x31, + 0x71, 0x5b, 0x78, 0xd9, 0x7b, 0x01, 0x21, 0xb5, 0x7b, 0xcc, 0xd8, 0xff, 0x0d, 0xb8, 0x79, 0xb7, + 0xc1, 0x39, 0xd2, 0x64, 0x3e, 0x2f, 0x6f, 0x76, 0x1b, 0xcd, 0x1f, 0x60, 0x5f, 0x65, 0x67, 0xb2, + 0x52, 0xc9, 0xd9, 0x79, 0x33, 0x6e, 0xc4, 0xdd, 0x81, 0x3f, 0x42, 0xb3, 0x52, 0x89, 0x92, 0x8e, + 0xe1, 0x82, 0xb7, 0x3d, 0xda, 0x59, 0xc7, 0xbe, 0xd3, 0x51, 0xdc, 0x36, 0xf8, 0x3d, 0xb4, 0x54, + 0x71, 0x2a, 0xf3, 0xca, 0xb1, 0x5c, 0xc3, 0xdb, 0x8a, 0x6f, 0x9c, 0x86, 0x7e, 0x2b, 0x72, 0xe9, + 0x6c, 0xb4, 0x50, 0xad, 0xf9, 0x53, 0xdc, 0x2d, 0x65, 0x9a, 0xe9, 0x2f, 0x96, 0xf3, 0x8f, 0x1d, + 0xdf, 0x6e, 0xf8, 0x3b, 0x5d, 0x36, 0xbb, 0x8d, 0x42, 0x6a, 0x53, 0x66, 0x86, 0xd4, 0x36, 0x99, + 0xf5, 0xf8, 0x03, 0x6e, 0xad, 0xbd, 0x02, 0x47, 0xb4, 0xc6, 0xaf, 0x67, 0xc1, 0xb1, 0xcf, 0x08, + 0x1f, 0xe0, 0xc6, 0xd4, 0x1f, 0x1f, 0x07, 0xd1, 0x5b, 0x06, 0xda, 0x1c, 0xf9, 0xd1, 0x44, 0x9b, + 0x9e, 0x36, 0xe1, 0x61, 0x10, 0x69, 0x63, 0x70, 0x1b, 0xe9, 0xd4, 0x7f, 0x33, 0x63, 0x94, 0x6f, + 0xa2, 0x1d, 0xfb, 0xe3, 0xc9, 0x61, 0x34, 0x7d, 0xcf, 0xcc, 0x83, 0xe7, 0x8b, 0xa5, 0x20, 0x57, + 0x4b, 0x41, 0xae, 0x97, 0x02, 0xbe, 0xd7, 0x02, 0x7e, 0xd5, 0x02, 0x2e, 0x6b, 0x01, 0x8b, 0x5a, + 0xc0, 0xdf, 0x5a, 0xc0, 0xbf, 0x5a, 0x90, 0xeb, 0x5a, 0xc0, 0x8f, 0x95, 0x20, 0x8b, 0x95, 0x20, + 0x57, 0x2b, 0x41, 0x3e, 0x59, 0xcd, 0x1f, 0xf1, 0xec, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x75, + 0x1d, 0x75, 0xff, 0x3b, 0x02, 0x00, 0x00, } func (x InstanceState) String() string { diff --git a/pkg/ring/ring.proto b/pkg/ring/ring.proto index 5dfeea8fad..44a2cad272 100644 --- a/pkg/ring/ring.proto +++ b/pkg/ring/ring.proto @@ -51,4 +51,6 @@ enum InstanceState { // This state is only used by gossiping code to distribute information about // instances that have been removed from the ring. Ring users should not use it directly. LEFT = 4; + + READONLY= 5; } diff --git a/pkg/ring/ring_test.go b/pkg/ring/ring_test.go index 3bba3a98c9..47bb91c676 100644 --- a/pkg/ring/ring_test.go +++ b/pkg/ring/ring_test.go @@ -726,12 +726,14 @@ func TestRing_Get_ExtendedReplicationSet(t *testing.T) { unhealthyTimestamp := time.Now().Add(-2 * time.Minute).Unix() tests := map[string]struct { + operation Operation instances map[string]InstanceDesc numberOfZones int replicationFactor int expectedInstances []InstanceDesc }{ "should return exactly number of replication factor when there is no extended replica set": { + operation: NewOp([]InstanceState{JOINING, ACTIVE}, func(s InstanceState) bool { return s == JOINING }), instances: map[string]InstanceDesc{ "instance-1": {Addr: "127.0.0.1", State: ACTIVE, Tokens: []uint32{1}, Timestamp: healthyTimestamp}, "instance-2": {Addr: "127.0.0.2", State: ACTIVE, Tokens: []uint32{2}, Timestamp: healthyTimestamp}, @@ -747,6 +749,7 @@ func TestRing_Get_ExtendedReplicationSet(t *testing.T) { }, }, "extended replica set should be included in the set": { + operation: NewOp([]InstanceState{JOINING, ACTIVE}, func(s InstanceState) bool { return s == JOINING }), instances: map[string]InstanceDesc{ "instance-1": {Addr: "127.0.0.1", State: JOINING, Tokens: []uint32{1}, Timestamp: healthyTimestamp}, "instance-2": {Addr: "127.0.0.2", State: JOINING, Tokens: []uint32{2}, Timestamp: healthyTimestamp}, @@ -765,6 +768,7 @@ func TestRing_Get_ExtendedReplicationSet(t *testing.T) { }, }, "unhealthy instances should be excluded from the set": { + operation: NewOp([]InstanceState{JOINING, ACTIVE}, func(s InstanceState) bool { return s == JOINING }), instances: map[string]InstanceDesc{ "instance-1": {Addr: "127.0.0.1", State: ACTIVE, Tokens: []uint32{1}, Timestamp: unhealthyTimestamp}, "instance-2": {Addr: "127.0.0.2", State: ACTIVE, Tokens: []uint32{2}, Timestamp: healthyTimestamp}, @@ -779,6 +783,7 @@ func TestRing_Get_ExtendedReplicationSet(t *testing.T) { }, }, "should return exactly number of replication factor when there is no extended replica set, when zone awareness is enabled": { + operation: NewOp([]InstanceState{JOINING, ACTIVE}, func(s InstanceState) bool { return s == JOINING }), instances: map[string]InstanceDesc{ "instance-1": {Addr: "127.0.0.1", State: ACTIVE, Tokens: []uint32{1}, Zone: "zone-1", Timestamp: healthyTimestamp}, "instance-2": {Addr: "127.0.0.2", State: ACTIVE, Tokens: []uint32{2}, Zone: "zone-2", Timestamp: healthyTimestamp}, @@ -794,6 +799,7 @@ func TestRing_Get_ExtendedReplicationSet(t *testing.T) { }, }, "extended replica set should be included in the set, when zone awareness is enabled": { + operation: NewOp([]InstanceState{JOINING, ACTIVE}, func(s InstanceState) bool { return s == JOINING }), instances: map[string]InstanceDesc{ "instance-1": {Addr: "127.0.0.1", State: JOINING, Tokens: []uint32{1}, Zone: "zone-1", Timestamp: healthyTimestamp}, "instance-2": {Addr: "127.0.0.2", State: JOINING, Tokens: []uint32{2}, Zone: "zone-2", Timestamp: healthyTimestamp}, @@ -813,6 +819,7 @@ func TestRing_Get_ExtendedReplicationSet(t *testing.T) { }, }, "extended replica set should be included in the set, when zone awareness is enabled and RF is greater than zones": { + operation: NewOp([]InstanceState{JOINING, ACTIVE}, func(s InstanceState) bool { return s == JOINING }), instances: map[string]InstanceDesc{ "instance-1": {Addr: "127.0.0.1", State: JOINING, Tokens: []uint32{1}, Zone: "zone-1", Timestamp: healthyTimestamp}, "instance-2": {Addr: "127.0.0.2", State: JOINING, Tokens: []uint32{2}, Zone: "zone-2", Timestamp: healthyTimestamp}, @@ -834,6 +841,56 @@ func TestRing_Get_ExtendedReplicationSet(t *testing.T) { {Addr: "127.0.0.7", State: ACTIVE, Tokens: []uint32{7}, Zone: "zone-3", Timestamp: healthyTimestamp}, }, }, + "extended replica set should be included in the set, when readonly using WriteNoExtend operation": { + operation: WriteNoExtend, + instances: map[string]InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: ACTIVE, Tokens: []uint32{1}, Zone: "zone-1", Timestamp: healthyTimestamp}, + "instance-2": {Addr: "127.0.0.2", State: READONLY, Tokens: []uint32{2}, Zone: "zone-2", Timestamp: healthyTimestamp}, + "instance-3": {Addr: "127.0.0.3", State: READONLY, Tokens: []uint32{3}, Zone: "zone-1", Timestamp: healthyTimestamp}, + "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Tokens: []uint32{4}, Zone: "zone-2", Timestamp: healthyTimestamp}, + "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Tokens: []uint32{5}, Zone: "zone-2", Timestamp: healthyTimestamp}, + "instance-6": {Addr: "127.0.0.6", State: ACTIVE, Tokens: []uint32{6}, Zone: "zone-3", Timestamp: healthyTimestamp}, + "instance-7": {Addr: "127.0.0.7", State: ACTIVE, Tokens: []uint32{7}, Zone: "zone-3", Timestamp: healthyTimestamp}, + }, + numberOfZones: 3, + replicationFactor: 3, + expectedInstances: []InstanceDesc{ + {Addr: "127.0.0.1", State: ACTIVE, Tokens: []uint32{1}, Zone: "zone-1", Timestamp: healthyTimestamp}, + {Addr: "127.0.0.4", State: ACTIVE, Tokens: []uint32{4}, Zone: "zone-2", Timestamp: healthyTimestamp}, + {Addr: "127.0.0.6", State: ACTIVE, Tokens: []uint32{6}, Zone: "zone-3", Timestamp: healthyTimestamp}, + }, + }, + "extended replica set should be included in the set, when readonly using Write operation": { + operation: Write, + instances: map[string]InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: ACTIVE, Tokens: []uint32{1}, Zone: "zone-1", Timestamp: healthyTimestamp}, + "instance-2": {Addr: "127.0.0.2", State: READONLY, Tokens: []uint32{2}, Zone: "zone-2", Timestamp: healthyTimestamp}, + "instance-3": {Addr: "127.0.0.3", State: READONLY, Tokens: []uint32{3}, Zone: "zone-1", Timestamp: healthyTimestamp}, + "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Tokens: []uint32{4}, Zone: "zone-2", Timestamp: healthyTimestamp}, + "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Tokens: []uint32{5}, Zone: "zone-2", Timestamp: healthyTimestamp}, + "instance-6": {Addr: "127.0.0.6", State: LEAVING, Tokens: []uint32{6}, Zone: "zone-3", Timestamp: healthyTimestamp}, + "instance-7": {Addr: "127.0.0.7", State: ACTIVE, Tokens: []uint32{7}, Zone: "zone-3", Timestamp: healthyTimestamp}, + }, + numberOfZones: 3, + replicationFactor: 3, + expectedInstances: []InstanceDesc{ + {Addr: "127.0.0.1", State: ACTIVE, Tokens: []uint32{1}, Zone: "zone-1", Timestamp: healthyTimestamp}, + {Addr: "127.0.0.4", State: ACTIVE, Tokens: []uint32{4}, Zone: "zone-2", Timestamp: healthyTimestamp}, + {Addr: "127.0.0.7", State: ACTIVE, Tokens: []uint32{7}, Zone: "zone-3", Timestamp: healthyTimestamp}, + }, + }, + "extended replica set should be included in the set, when readonly using Write operation and 1 zone": { + operation: Write, + instances: map[string]InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: READONLY, Tokens: []uint32{1}, Zone: "", Timestamp: healthyTimestamp}, + "instance-2": {Addr: "127.0.0.2", State: ACTIVE, Tokens: []uint32{2}, Zone: "", Timestamp: healthyTimestamp}, + }, + numberOfZones: 1, + replicationFactor: 1, + expectedInstances: []InstanceDesc{ + {Addr: "127.0.0.2", State: ACTIVE, Tokens: []uint32{2}, Zone: "", Timestamp: healthyTimestamp}, + }, + }, } for testName, testData := range tests { @@ -854,8 +911,7 @@ func TestRing_Get_ExtendedReplicationSet(t *testing.T) { KVClient: &MockClient{}, } - testOperation := NewOp([]InstanceState{JOINING, ACTIVE}, func(s InstanceState) bool { return s == JOINING }) - set, err := ring.Get(0, testOperation, nil, nil, nil) + set, err := ring.Get(0, testData.operation, nil, nil, nil) assert.NoError(t, err) assert.Equal(t, testData.expectedInstances, set.Instances) }) @@ -887,11 +943,12 @@ func TestRing_GetAllHealthy(t *testing.T) { "instance-2": {Addr: "127.0.0.2", State: PENDING, Timestamp: now.Add(-10 * time.Second).Unix()}, "instance-3": {Addr: "127.0.0.3", State: JOINING, Timestamp: now.Add(-20 * time.Second).Unix()}, "instance-4": {Addr: "127.0.0.4", State: LEAVING, Timestamp: now.Add(-30 * time.Second).Unix()}, - "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix()}, + "instance-5": {Addr: "127.0.0.5", State: READONLY, Timestamp: now.Add(-40 * time.Second).Unix()}, + "instance-6": {Addr: "127.0.0.6", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix()}, }, - expectedSetForRead: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"}, + expectedSetForRead: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, expectedSetForWrite: []string{"127.0.0.1"}, - expectedSetForReporting: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"}, + expectedSetForReporting: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, }, } @@ -1087,6 +1144,34 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { expectedErrForWrite: ErrTooManyUnhealthyInstances, expectedErrForReporting: ErrTooManyUnhealthyInstances, }, + "should succeed with READONLY instance": { + ringInstances: map[string]InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: ACTIVE, Timestamp: now.Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-1", "", 128, true)}, + "instance-2": {Addr: "127.0.0.2", State: ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-2", "", 128, true)}, + "instance-3": {Addr: "127.0.0.3", State: ACTIVE, Timestamp: now.Add(-20 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-3", "", 128, true)}, + "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-30 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-4", "", 128, true)}, + "instance-5": {Addr: "127.0.0.5", State: READONLY, Timestamp: now.Add(-40 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-5", "", 128, true)}, + }, + ringHeartbeatTimeout: time.Minute, + ringReplicationFactor: 1, + expectedSetForRead: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + expectedErrForWrite: ErrTooManyUnhealthyInstances, + expectedSetForReporting: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + }, + "should succeed with READONLY instance and RF = 3": { + ringInstances: map[string]InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: ACTIVE, Timestamp: now.Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-1", "", 128, true)}, + "instance-2": {Addr: "127.0.0.2", State: READONLY, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-2", "", 128, true)}, + "instance-3": {Addr: "127.0.0.3", State: ACTIVE, Timestamp: now.Add(-20 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-3", "", 128, true)}, + "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-30 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-4", "", 128, true)}, + "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-40 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-5", "", 128, true)}, + }, + ringHeartbeatTimeout: time.Minute, + ringReplicationFactor: 3, + expectedSetForRead: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + expectedSetForWrite: []string{"127.0.0.1", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + expectedSetForReporting: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + }, } for testName, testData := range tests { @@ -3001,6 +3086,7 @@ func TestUpdateMetrics(t *testing.T) { ring_members{name="test",state="JOINING"} 0 ring_members{name="test",state="LEAVING"} 0 ring_members{name="test",state="PENDING"} 0 + ring_members{name="test",state="READONLY"} 0 ring_members{name="test",state="Unhealthy"} 0 # HELP ring_oldest_member_timestamp Timestamp of the oldest member in the ring. # TYPE ring_oldest_member_timestamp gauge @@ -3008,6 +3094,7 @@ func TestUpdateMetrics(t *testing.T) { ring_oldest_member_timestamp{name="test",state="JOINING"} 0 ring_oldest_member_timestamp{name="test",state="LEAVING"} 0 ring_oldest_member_timestamp{name="test",state="PENDING"} 0 + ring_oldest_member_timestamp{name="test",state="READONLY"} 0 ring_oldest_member_timestamp{name="test",state="Unhealthy"} 0 # HELP ring_tokens_owned The number of tokens in the ring owned by the member # TYPE ring_tokens_owned gauge @@ -3027,6 +3114,7 @@ func TestUpdateMetrics(t *testing.T) { ring_members{name="test",state="JOINING"} 0 ring_members{name="test",state="LEAVING"} 0 ring_members{name="test",state="PENDING"} 0 + ring_members{name="test",state="READONLY"} 0 ring_members{name="test",state="Unhealthy"} 0 # HELP ring_oldest_member_timestamp Timestamp of the oldest member in the ring. # TYPE ring_oldest_member_timestamp gauge @@ -3034,6 +3122,7 @@ func TestUpdateMetrics(t *testing.T) { ring_oldest_member_timestamp{name="test",state="JOINING"} 0 ring_oldest_member_timestamp{name="test",state="LEAVING"} 0 ring_oldest_member_timestamp{name="test",state="PENDING"} 0 + ring_oldest_member_timestamp{name="test",state="READONLY"} 0 ring_oldest_member_timestamp{name="test",state="Unhealthy"} 0 # HELP ring_tokens_total Number of tokens in the ring # TYPE ring_tokens_total gauge @@ -3105,6 +3194,7 @@ func TestUpdateMetricsWithRemoval(t *testing.T) { ring_members{name="test",state="JOINING"} 0 ring_members{name="test",state="LEAVING"} 0 ring_members{name="test",state="PENDING"} 0 + ring_members{name="test",state="READONLY"} 0 ring_members{name="test",state="Unhealthy"} 0 # HELP ring_oldest_member_timestamp Timestamp of the oldest member in the ring. # TYPE ring_oldest_member_timestamp gauge @@ -3112,6 +3202,7 @@ func TestUpdateMetricsWithRemoval(t *testing.T) { ring_oldest_member_timestamp{name="test",state="JOINING"} 0 ring_oldest_member_timestamp{name="test",state="LEAVING"} 0 ring_oldest_member_timestamp{name="test",state="PENDING"} 0 + ring_oldest_member_timestamp{name="test",state="READONLY"} 0 ring_oldest_member_timestamp{name="test",state="Unhealthy"} 0 # HELP ring_tokens_owned The number of tokens in the ring owned by the member # TYPE ring_tokens_owned gauge @@ -3140,6 +3231,7 @@ func TestUpdateMetricsWithRemoval(t *testing.T) { ring_members{name="test",state="JOINING"} 0 ring_members{name="test",state="LEAVING"} 0 ring_members{name="test",state="PENDING"} 0 + ring_members{name="test",state="READONLY"} 0 ring_members{name="test",state="Unhealthy"} 0 # HELP ring_oldest_member_timestamp Timestamp of the oldest member in the ring. # TYPE ring_oldest_member_timestamp gauge @@ -3147,6 +3239,7 @@ func TestUpdateMetricsWithRemoval(t *testing.T) { ring_oldest_member_timestamp{name="test",state="JOINING"} 0 ring_oldest_member_timestamp{name="test",state="LEAVING"} 0 ring_oldest_member_timestamp{name="test",state="PENDING"} 0 + ring_oldest_member_timestamp{name="test",state="READONLY"} 0 ring_oldest_member_timestamp{name="test",state="Unhealthy"} 0 # HELP ring_tokens_owned The number of tokens in the ring owned by the member # TYPE ring_tokens_owned gauge diff --git a/pkg/ruler/client_pool_test.go b/pkg/ruler/client_pool_test.go index 11c2ce4c2b..66fe273a68 100644 --- a/pkg/ruler/client_pool_test.go +++ b/pkg/ruler/client_pool_test.go @@ -14,6 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/services" ) func Test_newRulerClientFactory(t *testing.T) { @@ -63,6 +64,12 @@ func Test_newRulerClientFactory(t *testing.T) { type mockRulerServer struct{} +func (m *mockRulerServer) LivenessCheck(ctx context.Context, request *LivenessCheckRequest) (*LivenessCheckResponse, error) { + return &LivenessCheckResponse{ + State: int32(services.Running), + }, nil +} + func (m *mockRulerServer) Rules(context.Context, *RulesRequest) (*RulesResponse, error) { return &RulesResponse{}, nil } diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 9c3fd2f0f9..6ed72c9831 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -13,7 +13,6 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" - "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -46,27 +45,15 @@ type PusherAppender struct { histogramLabels []labels.Labels histograms []cortexpb.Histogram userID string - evaluationDelay time.Duration } func (a *PusherAppender) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h == nil && fh == nil { return 0, errors.New("no histogram") } - if h != nil { - // A histogram sample is considered stale if its sum is set to NaN. - // https://github.com/prometheus/prometheus/blob/b6ef745016fa9472fdd0ae20f75a9682e01d1e5c/tsdb/head_append.go#L339-L346 - if a.evaluationDelay > 0 && (value.IsStaleNaN(h.Sum)) { - t -= a.evaluationDelay.Milliseconds() - } a.histograms = append(a.histograms, cortexpb.HistogramToHistogramProto(t, h)) } else { - // A histogram sample is considered stale if its sum is set to NaN. - // https://github.com/prometheus/prometheus/blob/b6ef745016fa9472fdd0ae20f75a9682e01d1e5c/tsdb/head_append.go#L339-L346 - if a.evaluationDelay > 0 && (value.IsStaleNaN(fh.Sum)) { - t -= a.evaluationDelay.Milliseconds() - } a.histograms = append(a.histograms, cortexpb.FloatHistogramToHistogramProto(t, fh)) } a.histogramLabels = append(a.histogramLabels, l) @@ -75,19 +62,6 @@ func (a *PusherAppender) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t func (a *PusherAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.labels = append(a.labels, l) - - // Adapt staleness markers for ruler evaluation delay. As the upstream code - // is using the actual time, when there is a no longer available series. - // This then causes 'out of order' append failures once the series is - // becoming available again. - // see https://github.com/prometheus/prometheus/blob/6c56a1faaaad07317ff585bda75b99bdba0517ad/rules/manager.go#L647-L660 - // Similar to staleness markers, the rule manager also appends actual time to the ALERTS and ALERTS_FOR_STATE series. - // See: https://github.com/prometheus/prometheus/blob/ae086c73cb4d6db9e8b67d5038d3704fea6aec4a/rules/alerting.go#L414-L417 - metricName := l.Get(labels.MetricName) - if a.evaluationDelay > 0 && (value.IsStaleNaN(v) || metricName == "ALERTS" || metricName == "ALERTS_FOR_STATE") { - t -= a.evaluationDelay.Milliseconds() - } - a.samples = append(a.samples, cortexpb.Sample{ TimestampMs: t, Value: v, @@ -164,16 +138,14 @@ func (t *PusherAppendable) Appender(ctx context.Context) storage.Appender { failedWrites: t.failedWrites, totalWrites: t.totalWrites, - ctx: ctx, - pusher: t.pusher, - userID: t.userID, - evaluationDelay: t.rulesLimits.EvaluationDelay(t.userID), + ctx: ctx, + pusher: t.pusher, + userID: t.userID, } } // RulesLimits defines limits used by Ruler. type RulesLimits interface { - EvaluationDelay(userID string) time.Duration MaxQueryLength(userID string) time.Duration RulerTenantShardSize(userID string) int RulerMaxRuleGroupsPerTenant(userID string) int @@ -182,7 +154,7 @@ type RulesLimits interface { DisabledRuleGroups(userID string) validation.DisabledRuleGroups } -// EngineQueryFunc returns a new engine query function by passing an altered timestamp. +// EngineQueryFunc returns a new engine query function validating max queryLength. // Modified from Prometheus rules.EngineQueryFunc // https://github.com/prometheus/prometheus/blob/v2.39.1/rules/manager.go#L189. func EngineQueryFunc(engine promql.QueryEngine, q storage.Queryable, overrides RulesLimits, userID string, lookbackDelta time.Duration) rules.QueryFunc { @@ -202,8 +174,7 @@ func EngineQueryFunc(engine promql.QueryEngine, q storage.Queryable, overrides R } } - evaluationDelay := overrides.EvaluationDelay(userID) - q, err := engine.NewInstantQuery(ctx, q, nil, qs, t.Add(-evaluationDelay)) + q, err := engine.NewInstantQuery(ctx, q, nil, qs, t) if err != nil { return nil, err } @@ -258,19 +229,25 @@ func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Coun } } -func RecordAndReportRuleQueryMetrics(qf rules.QueryFunc, queryTime prometheus.Counter, logger log.Logger) rules.QueryFunc { - if queryTime == nil { - return qf - } +func RecordAndReportRuleQueryMetrics(qf rules.QueryFunc, userID string, evalMetrics *RuleEvalMetrics, logger log.Logger) rules.QueryFunc { + queryTime := evalMetrics.RulerQuerySeconds.WithLabelValues(userID) + querySeries := evalMetrics.RulerQuerySeries.WithLabelValues(userID) + querySample := evalMetrics.RulerQuerySamples.WithLabelValues(userID) + queryChunkBytes := evalMetrics.RulerQueryChunkBytes.WithLabelValues(userID) + queryDataBytes := evalMetrics.RulerQueryDataBytes.WithLabelValues(userID) return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { queryStats, ctx := stats.ContextWithEmptyStats(ctx) // If we've been passed a counter we want to record the wall time spent executing this request. timer := prometheus.NewTimer(nil) + defer func() { querySeconds := timer.ObserveDuration().Seconds() queryTime.Add(querySeconds) - + querySeries.Add(float64(queryStats.FetchedSeriesCount)) + querySample.Add(float64(queryStats.FetchedSamplesCount)) + queryChunkBytes.Add(float64(queryStats.FetchedChunkBytes)) + queryDataBytes.Add(float64(queryStats.FetchedDataBytes)) // Log ruler query stats. logMessage := []interface{}{ "msg", "query stats", @@ -332,23 +309,24 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi q = querier.NewErrorTranslateQueryableWithFn(q, WrapQueryableErrors) return func(ctx context.Context, userID string, notifier *notifier.Manager, logger log.Logger, reg prometheus.Registerer) RulesManager { - var queryTime prometheus.Counter - if evalMetrics.RulerQuerySeconds != nil { - queryTime = evalMetrics.RulerQuerySeconds.WithLabelValues(userID) - } - failedQueries := evalMetrics.FailedQueriesVec.WithLabelValues(userID) totalQueries := evalMetrics.TotalQueriesVec.WithLabelValues(userID) totalWrites := evalMetrics.TotalWritesVec.WithLabelValues(userID) failedWrites := evalMetrics.FailedWritesVec.WithLabelValues(userID) + var queryFunc rules.QueryFunc engineQueryFunc := EngineQueryFunc(engine, q, overrides, userID, cfg.LookbackDelta) metricsQueryFunc := MetricsQueryFunc(engineQueryFunc, totalQueries, failedQueries) + if cfg.EnableQueryStats { + queryFunc = RecordAndReportRuleQueryMetrics(metricsQueryFunc, userID, evalMetrics, logger) + } else { + queryFunc = metricsQueryFunc + } return rules.NewManager(&rules.ManagerOptions{ Appendable: NewPusherAppendable(p, userID, overrides, totalWrites, failedWrites), Queryable: q, - QueryFunc: RecordAndReportRuleQueryMetrics(metricsQueryFunc, queryTime, logger), + QueryFunc: queryFunc, Context: user.InjectOrgID(ctx, userID), ExternalURL: cfg.ExternalURL.URL, NotifyFunc: SendAlerts(notifier, cfg.ExternalURL.URL.String()), diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index cf4dc238d6..3a3d663350 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -23,6 +23,7 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -43,7 +44,6 @@ func TestPusherAppendable(t *testing.T) { lbls1 := cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{labels.MetricName: "foo_bar"})) lbls2 := cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{labels.MetricName: "ALERTS", labels.AlertName: "boop"})) - lbls3 := cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{labels.MetricName: "ALERTS_FOR_STATE", labels.AlertName: "boop"})) testHistogram := tsdbutil.GenerateTestHistogram(1) testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(2) @@ -55,14 +55,13 @@ func TestPusherAppendable(t *testing.T) { for _, tc := range []struct { name string series string - evalDelay time.Duration value float64 histogram *histogram.Histogram floatHistogram *histogram.FloatHistogram expectedReq *cortexpb.WriteRequest }{ { - name: "tenant without delay, normal value", + name: "tenant, normal value", series: "foo_bar", value: 1.234, expectedReq: &cortexpb.WriteRequest{ @@ -80,7 +79,7 @@ func TestPusherAppendable(t *testing.T) { }, }, { - name: "tenant without delay, stale nan value", + name: "tenant, stale nan value", series: "foo_bar", value: math.Float64frombits(value.StaleNaN), expectedReq: &cortexpb.WriteRequest{ @@ -98,45 +97,7 @@ func TestPusherAppendable(t *testing.T) { }, }, { - name: "tenant with delay, normal value", - series: "foo_bar", - value: 1.234, - evalDelay: time.Minute, - expectedReq: &cortexpb.WriteRequest{ - Timeseries: []cortexpb.PreallocTimeseries{ - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls1, - Samples: []cortexpb.Sample{ - {Value: 1.234, TimestampMs: 120_000}, - }, - }, - }, - }, - Source: cortexpb.RULE, - }, - }, - { - name: "tenant with delay, stale nan value", - series: "foo_bar", - value: math.Float64frombits(value.StaleNaN), - evalDelay: time.Minute, - expectedReq: &cortexpb.WriteRequest{ - Timeseries: []cortexpb.PreallocTimeseries{ - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls1, - Samples: []cortexpb.Sample{ - {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 60_000}, - }, - }, - }, - }, - Source: cortexpb.RULE, - }, - }, - { - name: "ALERTS without delay, normal value", + name: "ALERTS, normal value", series: `ALERTS{alertname="boop"}`, value: 1.234, expectedReq: &cortexpb.WriteRequest{ @@ -154,7 +115,7 @@ func TestPusherAppendable(t *testing.T) { }, }, { - name: "ALERTS without delay, stale nan value", + name: "ALERTS, stale nan value", series: `ALERTS{alertname="boop"}`, value: math.Float64frombits(value.StaleNaN), expectedReq: &cortexpb.WriteRequest{ @@ -172,45 +133,7 @@ func TestPusherAppendable(t *testing.T) { }, }, { - name: "ALERTS with delay, normal value", - series: `ALERTS{alertname="boop"}`, - value: 1.234, - evalDelay: time.Minute, - expectedReq: &cortexpb.WriteRequest{ - Timeseries: []cortexpb.PreallocTimeseries{ - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls2, - Samples: []cortexpb.Sample{ - {Value: 1.234, TimestampMs: 60_000}, - }, - }, - }, - }, - Source: cortexpb.RULE, - }, - }, - { - name: "ALERTS with delay, stale nan value", - series: `ALERTS_FOR_STATE{alertname="boop"}`, - value: math.Float64frombits(value.StaleNaN), - evalDelay: time.Minute, - expectedReq: &cortexpb.WriteRequest{ - Timeseries: []cortexpb.PreallocTimeseries{ - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls3, - Samples: []cortexpb.Sample{ - {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 60_000}, - }, - }, - }, - }, - Source: cortexpb.RULE, - }, - }, - { - name: "tenant without delay, normal histogram", + name: "tenant, normal histogram", series: "foo_bar", histogram: testHistogram, expectedReq: &cortexpb.WriteRequest{ @@ -228,7 +151,7 @@ func TestPusherAppendable(t *testing.T) { }, }, { - name: "tenant without delay, float histogram", + name: "tenant, float histogram", series: "foo_bar", floatHistogram: testFloatHistogram, expectedReq: &cortexpb.WriteRequest{ @@ -246,7 +169,7 @@ func TestPusherAppendable(t *testing.T) { }, }, { - name: "tenant without delay, both sample and histogram", + name: "tenant, both sample and histogram", series: "foo_bar", value: 1.234, histogram: testHistogram, @@ -273,7 +196,7 @@ func TestPusherAppendable(t *testing.T) { }, }, { - name: "tenant without delay, both sample and float histogram", + name: "tenant, both sample and float histogram", series: "foo_bar", value: 1.234, floatHistogram: testFloatHistogram, @@ -299,106 +222,9 @@ func TestPusherAppendable(t *testing.T) { Source: cortexpb.RULE, }, }, - { - name: "tenant with delay and NaN sample, normal histogram", - series: "foo_bar", - value: math.Float64frombits(value.StaleNaN), - evalDelay: time.Minute, - histogram: testHistogram, - expectedReq: &cortexpb.WriteRequest{ - Timeseries: []cortexpb.PreallocTimeseries{ - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls1, - Samples: []cortexpb.Sample{ - {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 60_000}, - }, - }, - }, - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls1, - Histograms: []cortexpb.Histogram{ - cortexpb.HistogramToHistogramProto(120_000, testHistogram), - }, - }, - }, - }, - Source: cortexpb.RULE, - }, - }, - { - name: "tenant with delay and NaN sample, float histogram", - series: "foo_bar", - value: math.Float64frombits(value.StaleNaN), - evalDelay: time.Minute, - floatHistogram: testFloatHistogram, - expectedReq: &cortexpb.WriteRequest{ - Timeseries: []cortexpb.PreallocTimeseries{ - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls1, - Samples: []cortexpb.Sample{ - {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 60_000}, - }, - }, - }, - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls1, - Histograms: []cortexpb.Histogram{ - cortexpb.FloatHistogramToHistogramProto(120_000, testFloatHistogram), - }, - }, - }, - }, - Source: cortexpb.RULE, - }, - }, - { - name: "tenant with delay, NaN histogram", - series: "foo_bar", - histogram: testHistogramWithNaN, - evalDelay: time.Minute, - expectedReq: &cortexpb.WriteRequest{ - Timeseries: []cortexpb.PreallocTimeseries{ - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls1, - Histograms: []cortexpb.Histogram{ - cortexpb.HistogramToHistogramProto(60_000, testHistogramWithNaN), - }, - }, - }, - }, - Source: cortexpb.RULE, - }, - }, - { - name: "tenant with delay, NaN float histogram", - series: "foo_bar", - floatHistogram: testFloatHistogramWithNaN, - evalDelay: time.Minute, - expectedReq: &cortexpb.WriteRequest{ - Timeseries: []cortexpb.PreallocTimeseries{ - { - TimeSeries: &cortexpb.TimeSeries{ - Labels: lbls1, - Histograms: []cortexpb.Histogram{ - cortexpb.FloatHistogramToHistogramProto(60_000, testFloatHistogramWithNaN), - }, - }, - }, - }, - Source: cortexpb.RULE, - }, - }, } { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() - pa.rulesLimits = &ruleLimits{ - evalDelay: tc.evalDelay, - } lbls, err := parser.ParseMetric(tc.series) require.NoError(t, err) @@ -461,7 +287,7 @@ func TestPusherErrors(t *testing.T) { writes := prometheus.NewCounter(prometheus.CounterOpts{}) failures := prometheus.NewCounter(prometheus.CounterOpts{}) - pa := NewPusherAppendable(pusher, "user-1", ruleLimits{evalDelay: 10 * time.Second}, writes, failures) + pa := NewPusherAppendable(pusher, "user-1", ruleLimits{}, writes, failures) lbls, err := parser.ParseMetric("foo_bar") require.NoError(t, err) @@ -567,14 +393,23 @@ func TestMetricsQueryFuncErrors(t *testing.T) { } func TestRecordAndReportRuleQueryMetrics(t *testing.T) { - queryTime := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}) + metrics := NewRuleEvalMetrics(Config{EnableQueryStats: true}, prometheus.DefaultRegisterer) mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + queryStats := stats.FromContext(ctx) + queryStats.AddFetchedSeries(2) + queryStats.AddFetchedSamples(2) + queryStats.AddFetchedChunkBytes(10) + queryStats.AddFetchedDataBytes(14) time.Sleep(1 * time.Second) return promql.Vector{}, nil } - qf := RecordAndReportRuleQueryMetrics(mockFunc, queryTime.WithLabelValues("userID"), log.NewNopLogger()) + qf := RecordAndReportRuleQueryMetrics(mockFunc, "userID", metrics, log.NewNopLogger()) _, _ = qf(context.Background(), "test", time.Now()) - require.GreaterOrEqual(t, testutil.ToFloat64(queryTime.WithLabelValues("userID")), float64(1)) + require.GreaterOrEqual(t, testutil.ToFloat64(metrics.RulerQuerySeconds.WithLabelValues("userID")), float64(1)) + require.Equal(t, testutil.ToFloat64(metrics.RulerQuerySeries.WithLabelValues("userID")), float64(2)) + require.Equal(t, testutil.ToFloat64(metrics.RulerQuerySamples.WithLabelValues("userID")), float64(2)) + require.Equal(t, testutil.ToFloat64(metrics.RulerQueryChunkBytes.WithLabelValues("userID")), float64(10)) + require.Equal(t, testutil.ToFloat64(metrics.RulerQueryDataBytes.WithLabelValues("userID")), float64(14)) } diff --git a/pkg/ruler/manager_metrics.go b/pkg/ruler/manager_metrics.go index 130387407c..93acdc26b1 100644 --- a/pkg/ruler/manager_metrics.go +++ b/pkg/ruler/manager_metrics.go @@ -225,11 +225,15 @@ func (m *ManagerMetrics) Collect(out chan<- prometheus.Metric) { } type RuleEvalMetrics struct { - TotalWritesVec *prometheus.CounterVec - FailedWritesVec *prometheus.CounterVec - TotalQueriesVec *prometheus.CounterVec - FailedQueriesVec *prometheus.CounterVec - RulerQuerySeconds *prometheus.CounterVec + TotalWritesVec *prometheus.CounterVec + FailedWritesVec *prometheus.CounterVec + TotalQueriesVec *prometheus.CounterVec + FailedQueriesVec *prometheus.CounterVec + RulerQuerySeconds *prometheus.CounterVec + RulerQuerySeries *prometheus.CounterVec + RulerQuerySamples *prometheus.CounterVec + RulerQueryChunkBytes *prometheus.CounterVec + RulerQueryDataBytes *prometheus.CounterVec } func NewRuleEvalMetrics(cfg Config, reg prometheus.Registerer) *RuleEvalMetrics { @@ -256,6 +260,22 @@ func NewRuleEvalMetrics(cfg Config, reg prometheus.Registerer) *RuleEvalMetrics Name: "cortex_ruler_query_seconds_total", Help: "Total amount of wall clock time spent processing queries by the ruler.", }, []string{"user"}) + m.RulerQuerySeries = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ruler_fetched_series_total", + Help: "Number of series fetched to execute a query by the ruler.", + }, []string{"user"}) + m.RulerQuerySamples = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ruler_samples_total", + Help: "Number of samples fetched to execute a query by the ruler.", + }, []string{"user"}) + m.RulerQueryChunkBytes = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ruler_fetched_chunks_bytes_total", + Help: "Size of all chunks fetched to execute a query in bytes by the ruler.", + }, []string{"user"}) + m.RulerQueryDataBytes = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ruler_fetched_data_bytes_total", + Help: "Size of all data fetched to execute a query in bytes by the ruler.", + }, []string{"user"}) } return m @@ -270,4 +290,52 @@ func (m *RuleEvalMetrics) deletePerUserMetrics(userID string) { if m.RulerQuerySeconds != nil { m.RulerQuerySeconds.DeleteLabelValues(userID) } + if m.RulerQuerySeries != nil { + m.RulerQuerySeries.DeleteLabelValues(userID) + } + if m.RulerQuerySamples != nil { + m.RulerQuerySamples.DeleteLabelValues(userID) + } + if m.RulerQueryChunkBytes != nil { + m.RulerQueryChunkBytes.DeleteLabelValues(userID) + } + if m.RulerQueryDataBytes != nil { + m.RulerQueryDataBytes.DeleteLabelValues(userID) + } +} + +type RuleGroupMetrics struct { + RuleGroupsInStore *prometheus.GaugeVec + tenants map[string]struct{} + allowedTenants *util.AllowedTenants +} + +func NewRuleGroupMetrics(reg prometheus.Registerer, allowedTenants *util.AllowedTenants) *RuleGroupMetrics { + m := &RuleGroupMetrics{ + RuleGroupsInStore: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_ruler_rule_groups_in_store", + Help: "The number of rule groups a tenant has in store.", + }, []string{"user"}), + allowedTenants: allowedTenants, + } + return m +} + +// UpdateRuleGroupsInStore updates the cortex_ruler_rule_groups_in_store metric with the provided number of rule +// groups per tenant and removing the metrics for tenants that are not present anymore +func (r *RuleGroupMetrics) UpdateRuleGroupsInStore(ruleGroupsCount map[string]int) { + tenants := make(map[string]struct{}, len(ruleGroupsCount)) + for userID, count := range ruleGroupsCount { + if !r.allowedTenants.IsAllowed(userID) { // if the tenant is disabled just ignore its rule groups + continue + } + tenants[userID] = struct{}{} + r.RuleGroupsInStore.WithLabelValues(userID).Set(float64(count)) + } + for userID := range r.tenants { + if _, ok := tenants[userID]; !ok { + r.RuleGroupsInStore.DeleteLabelValues(userID) + } + } + r.tenants = tenants } diff --git a/pkg/ruler/manager_metrics_test.go b/pkg/ruler/manager_metrics_test.go index 4b851ef192..dfc9800ad5 100644 --- a/pkg/ruler/manager_metrics_test.go +++ b/pkg/ruler/manager_metrics_test.go @@ -574,8 +574,16 @@ func TestRuleEvalMetricsDeletePerUserMetrics(t *testing.T) { m.FailedQueriesVec.WithLabelValues("fake2").Add(10) m.RulerQuerySeconds.WithLabelValues("fake1").Add(10) m.RulerQuerySeconds.WithLabelValues("fake2").Add(10) - - metricNames := []string{"cortex_ruler_write_requests_total", "cortex_ruler_write_requests_failed_total", "cortex_ruler_queries_total", "cortex_ruler_queries_failed_total", "cortex_ruler_query_seconds_total"} + m.RulerQuerySeries.WithLabelValues("fake1").Add(10) + m.RulerQuerySeries.WithLabelValues("fake2").Add(10) + m.RulerQuerySamples.WithLabelValues("fake1").Add(10) + m.RulerQuerySamples.WithLabelValues("fake2").Add(10) + m.RulerQueryChunkBytes.WithLabelValues("fake1").Add(10) + m.RulerQueryChunkBytes.WithLabelValues("fake2").Add(10) + m.RulerQueryDataBytes.WithLabelValues("fake1").Add(10) + m.RulerQueryDataBytes.WithLabelValues("fake2").Add(10) + + metricNames := []string{"cortex_ruler_write_requests_total", "cortex_ruler_write_requests_failed_total", "cortex_ruler_queries_total", "cortex_ruler_queries_failed_total", "cortex_ruler_query_seconds_total", "cortex_ruler_fetched_series_total", "cortex_ruler_samples_total", "cortex_ruler_fetched_chunks_bytes_total", "cortex_ruler_fetched_data_bytes_total"} gm, err := reg.Gather() require.NoError(t, err) mfm, err := util.NewMetricFamilyMap(gm) @@ -595,3 +603,41 @@ func TestRuleEvalMetricsDeletePerUserMetrics(t *testing.T) { require.Contains(t, mfm[name].String(), "value:\"fake2\"") } } + +func TestRuleGroupMetrics(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + m := NewRuleGroupMetrics(reg, util.NewAllowedTenants(nil, []string{"fake3"})) + m.UpdateRuleGroupsInStore(map[string]int{ + "fake1": 10, + "fake2": 20, + "fake3": 30, + }) + gm, err := reg.Gather() + require.NoError(t, err) + mfm, err := util.NewMetricFamilyMap(gm) + require.NoError(t, err) + require.Equal(t, 2, len(mfm["cortex_ruler_rule_groups_in_store"].Metric)) + requireMetricEqual(t, mfm["cortex_ruler_rule_groups_in_store"].Metric[0], map[string]string{ + "user": "fake1", + }, float64(10)) + requireMetricEqual(t, mfm["cortex_ruler_rule_groups_in_store"].Metric[1], map[string]string{ + "user": "fake2", + }, float64(20)) + m.UpdateRuleGroupsInStore(map[string]int{ + "fake2": 30, + }) + gm, err = reg.Gather() + require.NoError(t, err) + mfm, err = util.NewMetricFamilyMap(gm) + require.NoError(t, err) + require.Equal(t, 1, len(mfm["cortex_ruler_rule_groups_in_store"].Metric)) + requireMetricEqual(t, mfm["cortex_ruler_rule_groups_in_store"].Metric[0], map[string]string{ + "user": "fake2", + }, float64(30)) + m.UpdateRuleGroupsInStore(make(map[string]int)) + gm, err = reg.Gather() + require.NoError(t, err) + mfm, err = util.NewMetricFamilyMap(gm) + require.NoError(t, err) + require.Nil(t, mfm["cortex_ruler_rule_groups_in_store"]) +} diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 56c4a63e59..fa405babcb 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -37,7 +37,6 @@ import ( util_api "github.com/cortexproject/cortex/pkg/util/api" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/cortexproject/cortex/pkg/util/grpcclient" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" @@ -81,6 +80,8 @@ const ( unknownHealthFilter string = "unknown" okHealthFilter string = "ok" errHealthFilter string = "err" + + livenessCheckTimeout = 100 * time.Millisecond ) type DisabledRuleGroupErr struct { @@ -98,7 +99,7 @@ type Config struct { // Labels to add to all alerts ExternalLabels labels.Labels `yaml:"external_labels,omitempty" doc:"nocli|description=Labels to add to all alerts."` // GRPC Client configuration. - ClientTLSConfig grpcclient.Config `yaml:"ruler_client"` + ClientTLSConfig ClientConfig `yaml:"ruler_client"` // How frequently to evaluate rules by default. EvaluationInterval time.Duration `yaml:"evaluation_interval"` // How frequently to poll for updated rules. @@ -151,6 +152,8 @@ type Config struct { EnableQueryStats bool `yaml:"query_stats_enabled"` DisableRuleGroupLabel bool `yaml:"disable_rule_group_label"` + + EnableHAEvaluation bool `yaml:"enable_ha_evaluation"` } // Validate config and returns error on failure @@ -217,9 +220,11 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.Var(&cfg.EnabledTenants, "ruler.enabled-tenants", "Comma separated list of tenants whose rules this ruler can evaluate. If specified, only these tenants will be handled by ruler, otherwise this ruler can process rules from all tenants. Subject to sharding.") f.Var(&cfg.DisabledTenants, "ruler.disabled-tenants", "Comma separated list of tenants whose rules this ruler cannot evaluate. If specified, a ruler that would normally pick the specified tenant(s) for processing will ignore them instead. Subject to sharding.") - f.BoolVar(&cfg.EnableQueryStats, "ruler.query-stats-enabled", false, "Report the wall time for ruler queries to complete as a per user metric and as an info level log message.") + f.BoolVar(&cfg.EnableQueryStats, "ruler.query-stats-enabled", false, "Report query statistics for ruler queries to complete as a per user metric and as an info level log message.") f.BoolVar(&cfg.DisableRuleGroupLabel, "ruler.disable-rule-group-label", false, "Disable the rule_group label on exported metrics") + f.BoolVar(&cfg.EnableHAEvaluation, "ruler.enable-ha-evaluation", false, "Enable high availability") + cfg.RingCheckPeriod = 5 * time.Second } @@ -294,6 +299,7 @@ type Ruler struct { ruleGroupStoreLoadDuration prometheus.Gauge ruleGroupSyncDuration prometheus.Gauge rulerGetRulesFailures *prometheus.CounterVec + ruleGroupMetrics *RuleGroupMetrics allowedTenants *util.AllowedTenants @@ -303,7 +309,7 @@ type Ruler struct { // NewRuler creates a new ruler from a distributor and chunk store. func NewRuler(cfg Config, manager MultiTenantManager, reg prometheus.Registerer, logger log.Logger, ruleStore rulestore.RuleStore, limits RulesLimits) (*Ruler, error) { - return newRuler(cfg, manager, reg, logger, ruleStore, limits, newRulerClientPool(cfg.ClientTLSConfig, logger, reg)) + return newRuler(cfg, manager, reg, logger, ruleStore, limits, newRulerClientPool(cfg.ClientTLSConfig.Config, logger, reg)) } func newRuler(cfg Config, manager MultiTenantManager, reg prometheus.Registerer, logger log.Logger, ruleStore rulestore.RuleStore, limits RulesLimits, clientPool ClientsPool) (*Ruler, error) { @@ -342,6 +348,7 @@ func newRuler(cfg Config, manager MultiTenantManager, reg prometheus.Registerer, Help: "The total number of failed rules request sent to rulers in getShardedRules.", }, []string{"ruler"}), } + ruler.ruleGroupMetrics = NewRuleGroupMetrics(reg, ruler.allowedTenants) if len(cfg.EnabledTenants) > 0 { level.Info(ruler.logger).Log("msg", "ruler using enabled users", "enabled", strings.Join(cfg.EnabledTenants, ", ")) @@ -379,7 +386,9 @@ func enableSharding(r *Ruler, ringStore kv.Client) error { // Define lifecycler delegates in reverse order (last to be called defined first because they're // chained via "next delegate"). delegate := ring.BasicLifecyclerDelegate(r) - delegate = ring.NewLeaveOnStoppingDelegate(delegate, r.logger) + if !r.Config().Ring.KeepInstanceInTheRingOnShutdown { + delegate = ring.NewLeaveOnStoppingDelegate(delegate, r.logger) + } delegate = ring.NewTokensPersistencyDelegate(r.cfg.Ring.TokensFilePath, ring.JOINING, delegate, r.logger) delegate = ring.NewAutoForgetDelegate(r.cfg.Ring.HeartbeatTimeout*ringAutoForgetUnhealthyPeriods, delegate, r.logger) @@ -397,6 +406,18 @@ func enableSharding(r *Ruler, ringStore kv.Client) error { return nil } +func (r *Ruler) Logger() log.Logger { + return r.logger +} + +func (r *Ruler) GetClientFor(addr string) (RulerClient, error) { + return r.clientsPool.GetClientFor(addr) +} + +func (r *Ruler) Config() Config { + return r.cfg +} + func (r *Ruler) starting(ctx context.Context) error { // If sharding is enabled, start the used subservices. if r.cfg.EnableSharding { @@ -488,34 +509,110 @@ func tokenForGroup(g *rulespb.RuleGroupDesc) uint32 { return ringHasher.Sum32() } -func instanceOwnsRuleGroup(r ring.ReadRing, g *rulespb.RuleGroupDesc, disabledRuleGroups validation.DisabledRuleGroups, instanceAddr string, forBackup bool) (bool, error) { +func (r *Ruler) instanceOwnsRuleGroup(rr ring.ReadRing, g *rulespb.RuleGroupDesc, disabledRuleGroups validation.DisabledRuleGroups, forBackup bool) (bool, error) { hash := tokenForGroup(g) - rlrs, err := r.Get(hash, RingOp, nil, nil, nil) + rlrs, err := rr.Get(hash, RingOp, nil, nil, nil) if err != nil { return false, errors.Wrap(err, "error reading ring to verify rule group ownership") } - var ownsRuleGroup bool + instanceAddr := r.lifecycler.GetInstanceAddr() if forBackup { // Only the second up to the last replica are used as backup for i := 1; i < len(rlrs.Instances); i++ { if rlrs.Instances[i].Addr == instanceAddr { - ownsRuleGroup = true - break + return ownsRuleGroupOrDisable(g, disabledRuleGroups) } } - } else { - // Even if the replication factor is set to a number bigger than 1, only the first ruler evaluates the rule group - ownsRuleGroup = rlrs.Instances[0].Addr == instanceAddr + return false, nil + } + if r.Config().EnableHAEvaluation { + for i, ruler := range rlrs.Instances { + if ruler.Addr == instanceAddr && i == 0 { + level.Debug(r.Logger()).Log("msg", "primary taking ownership", "user", g.User, "group", g.Name, "namespace", g.Namespace, "ruler", instanceAddr) + return ownsRuleGroupOrDisable(g, disabledRuleGroups) + } + if ruler.Addr == instanceAddr && r.nonPrimaryInstanceOwnsRuleGroup(g, rlrs.GetAddresses()[:i]) { + level.Info(r.Logger()).Log("msg", "non-primary ruler taking ownership", "user", g.User, "group", g.Name, "namespace", g.Namespace, "ruler", instanceAddr) + return ownsRuleGroupOrDisable(g, disabledRuleGroups) + } + } + return false, nil } + // Even if the replication factor is set to a number bigger than 1, only the first ruler evaluates the rule group + if rlrs.Instances[0].Addr == instanceAddr { + return ownsRuleGroupOrDisable(g, disabledRuleGroups) + } + return false, nil +} - if ownsRuleGroup && ruleGroupDisabled(g, disabledRuleGroups) { +func ownsRuleGroupOrDisable(g *rulespb.RuleGroupDesc, disabledRuleGroups validation.DisabledRuleGroups) (bool, error) { + if ruleGroupDisabled(g, disabledRuleGroups) { return false, &DisabledRuleGroupErr{Message: fmt.Sprintf("rule group %s, namespace %s, user %s is disabled", g.Name, g.Namespace, g.User)} } + return true, nil +} - return ownsRuleGroup, nil +func (r *Ruler) LivenessCheck(_ context.Context, request *LivenessCheckRequest) (*LivenessCheckResponse, error) { + if r.lifecycler.ServiceContext().Err() != nil || r.subservices.IsStopped() { + return nil, errors.New("ruler's context is canceled and might be stopping soon") + } + if !r.subservices.IsHealthy() { + return nil, errors.New("not all subservices are in healthy state") + } + return &LivenessCheckResponse{State: int32(r.State())}, nil +} + +// This function performs a liveness check against the provided replicas. If any one of the replicas responds with a state = Running, then +// this Ruler should not take ownership of the rule group. Otherwise, this Ruler must take ownership of the rule group to avoid missing evaluations +func (r *Ruler) nonPrimaryInstanceOwnsRuleGroup(g *rulespb.RuleGroupDesc, replicas []string) bool { + userID := g.User + + jobs := concurrency.CreateJobsFromStrings(replicas) + + errorChan := make(chan error, len(jobs)) + responseChan := make(chan *LivenessCheckResponse, len(jobs)) + + ctx := user.InjectOrgID(context.Background(), userID) + ctx, cancel := context.WithTimeout(ctx, livenessCheckTimeout) + defer cancel() + + err := concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { + addr := job.(string) + rulerClient, err := r.GetClientFor(addr) + if err != nil { + errorChan <- err + level.Error(r.Logger()).Log("msg", "unable to get client for ruler", "ruler addr", addr) + return nil + } + level.Debug(r.Logger()).Log("msg", "performing liveness check against", "addr", addr, "for", g.Name) + + resp, err := rulerClient.LivenessCheck(ctx, &LivenessCheckRequest{}) + if err != nil { + errorChan <- err + level.Debug(r.Logger()).Log("msg", "liveness check failed", "addr", addr, "for", g.Name, "err", err.Error()) + return nil + } + level.Debug(r.Logger()).Log("msg", "liveness check succeeded ", "addr", addr, "for", g.Name, "ruler state", services.State(resp.GetState())) + responseChan <- resp + return nil + }) + + close(errorChan) + close(responseChan) + + if len(errorChan) == len(jobs) || err != nil { + return true + } + + for resp := range responseChan { + if services.State(resp.GetState()) == services.Running { + return false + } + } + return true } func (r *Ruler) ServeHTTP(w http.ResponseWriter, req *http.Request) { @@ -577,7 +674,7 @@ func (r *Ruler) run(ctx context.Context) error { } func (r *Ruler) syncRules(ctx context.Context, reason string) { - level.Debug(r.logger).Log("msg", "syncing rules", "reason", reason) + level.Info(r.logger).Log("msg", "syncing rules", "reason", reason) r.rulerSync.WithLabelValues(reason).Inc() timer := prometheus.NewTimer(nil) @@ -591,6 +688,10 @@ func (r *Ruler) syncRules(ctx context.Context, reason string) { return } + if ctx.Err() != nil { + level.Info(r.logger).Log("msg", "context is canceled. not syncing rules") + return + } // This will also delete local group files for users that are no longer in 'configs' map. r.manager.SyncRuleGroups(ctx, loadedConfigs) @@ -667,7 +768,9 @@ func (r *Ruler) listRulesNoSharding(ctx context.Context) (map[string]rulespb.Rul if err != nil { return nil, nil, err } + ruleGroupCounts := make(map[string]int, len(allRuleGroups)) for userID, groups := range allRuleGroups { + ruleGroupCounts[userID] = len(groups) disabledRuleGroupsForUser := r.limits.DisabledRuleGroups(userID) if len(disabledRuleGroupsForUser) == 0 { continue @@ -682,6 +785,7 @@ func (r *Ruler) listRulesNoSharding(ctx context.Context) (map[string]rulespb.Rul } allRuleGroups[userID] = filteredGroupsForUser } + r.ruleGroupMetrics.UpdateRuleGroupsInStore(ruleGroupCounts) return allRuleGroups, nil, nil } @@ -691,20 +795,23 @@ func (r *Ruler) listRulesShardingDefault(ctx context.Context) (map[string]rulesp return nil, nil, err } + ruleGroupCounts := make(map[string]int, len(configs)) ownedConfigs := make(map[string]rulespb.RuleGroupList) backedUpConfigs := make(map[string]rulespb.RuleGroupList) for userID, groups := range configs { - owned := filterRuleGroups(userID, groups, r.limits.DisabledRuleGroups(userID), r.ring, r.lifecycler.GetInstanceAddr(), r.logger, r.ringCheckErrors) + ruleGroupCounts[userID] = len(groups) + owned := r.filterRuleGroups(userID, groups, r.ring) if len(owned) > 0 { ownedConfigs[userID] = owned } if r.cfg.RulesBackupEnabled() { - backup := filterBackupRuleGroups(userID, groups, r.limits.DisabledRuleGroups(userID), r.ring, r.lifecycler.GetInstanceAddr(), r.logger, r.ringCheckErrors) + backup := r.filterBackupRuleGroups(userID, groups, owned, r.ring) if len(backup) > 0 { backedUpConfigs[userID] = backup } } } + r.ruleGroupMetrics.UpdateRuleGroupsInStore(ruleGroupCounts) return ownedConfigs, backedUpConfigs, nil } @@ -732,6 +839,7 @@ func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulesp } if len(userRings) == 0 { + r.ruleGroupMetrics.UpdateRuleGroupsInStore(make(map[string]int)) return nil, nil, nil } @@ -744,6 +852,8 @@ func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulesp mu := sync.Mutex{} owned := map[string]rulespb.RuleGroupList{} backedUp := map[string]rulespb.RuleGroupList{} + gLock := sync.Mutex{} + ruleGroupCounts := make(map[string]int, len(userRings)) concurrency := loadRulesConcurrency if len(userRings) < concurrency { @@ -758,11 +868,14 @@ func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulesp if err != nil { return errors.Wrapf(err, "failed to fetch rule groups for user %s", userID) } + gLock.Lock() + ruleGroupCounts[userID] = len(groups) + gLock.Unlock() - filterOwned := filterRuleGroups(userID, groups, r.limits.DisabledRuleGroups(userID), userRings[userID], r.lifecycler.GetInstanceAddr(), r.logger, r.ringCheckErrors) + filterOwned := r.filterRuleGroups(userID, groups, userRings[userID]) var filterBackup []*rulespb.RuleGroupDesc if r.cfg.RulesBackupEnabled() { - filterBackup = filterBackupRuleGroups(userID, groups, r.limits.DisabledRuleGroups(userID), userRings[userID], r.lifecycler.GetInstanceAddr(), r.logger, r.ringCheckErrors) + filterBackup = r.filterBackupRuleGroups(userID, groups, filterOwned, userRings[userID]) } if len(filterOwned) == 0 && len(filterBackup) == 0 { continue @@ -781,36 +894,37 @@ func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulesp } err = g.Wait() + r.ruleGroupMetrics.UpdateRuleGroupsInStore(ruleGroupCounts) return owned, backedUp, err } // filterRuleGroups returns map of rule groups that given instance "owns" based on supplied ring. // This function only uses User, Namespace, and Name fields of individual RuleGroups. // -// Reason why this function is not a method on Ruler is to make sure we don't accidentally use r.ring, -// but only ring passed as parameter. -func filterRuleGroups(userID string, ruleGroups []*rulespb.RuleGroupDesc, disabledRuleGroups validation.DisabledRuleGroups, ring ring.ReadRing, instanceAddr string, log log.Logger, ringCheckErrors prometheus.Counter) []*rulespb.RuleGroupDesc { +// This method must not use r.ring, but only ring passed as parameter. +func (r *Ruler) filterRuleGroups(userID string, ruleGroups []*rulespb.RuleGroupDesc, ring ring.ReadRing) []*rulespb.RuleGroupDesc { // Prune the rule group to only contain rules that this ruler is responsible for, based on ring. var result []*rulespb.RuleGroupDesc + for _, g := range ruleGroups { - owned, err := instanceOwnsRuleGroup(ring, g, disabledRuleGroups, instanceAddr, false) + owned, err := r.instanceOwnsRuleGroup(ring, g, r.limits.DisabledRuleGroups(userID), false) if err != nil { switch e := err.(type) { case *DisabledRuleGroupErr: - level.Info(log).Log("msg", e.Message) + level.Info(r.logger).Log("msg", e.Message) continue default: - ringCheckErrors.Inc() - level.Error(log).Log("msg", "failed to check if the ruler replica owns the rule group", "user", userID, "namespace", g.Namespace, "group", g.Name, "err", err) + r.ringCheckErrors.Inc() + level.Error(r.logger).Log("msg", "failed to check if the ruler replica owns the rule group", "user", userID, "namespace", g.Namespace, "group", g.Name, "err", err) continue } } if owned { - level.Debug(log).Log("msg", "rule group owned", "user", g.User, "namespace", g.Namespace, "name", g.Name) + level.Debug(r.logger).Log("msg", "rule group owned", "user", g.User, "namespace", g.Namespace, "name", g.Name) result = append(result, g) } else { - level.Debug(log).Log("msg", "rule group not owned, ignoring", "user", g.User, "namespace", g.Namespace, "name", g.Name) + level.Debug(r.logger).Log("msg", "rule group not owned, ignoring", "user", g.User, "namespace", g.Namespace, "name", g.Name) } } @@ -820,29 +934,38 @@ func filterRuleGroups(userID string, ruleGroups []*rulespb.RuleGroupDesc, disabl // filterBackupRuleGroups returns map of rule groups that given instance backs up based on supplied ring. // This function only uses User, Namespace, and Name fields of individual RuleGroups. // -// Reason why this function is not a method on Ruler is to make sure we don't accidentally use r.ring, -// but only ring passed as parameter. -func filterBackupRuleGroups(userID string, ruleGroups []*rulespb.RuleGroupDesc, disabledRuleGroups validation.DisabledRuleGroups, ring ring.ReadRing, instanceAddr string, log log.Logger, ringCheckErrors prometheus.Counter) []*rulespb.RuleGroupDesc { +// This method must not use r.ring, but only ring passed as parameter +func (r *Ruler) filterBackupRuleGroups(userID string, ruleGroups []*rulespb.RuleGroupDesc, owned []*rulespb.RuleGroupDesc, ring ring.ReadRing) []*rulespb.RuleGroupDesc { var result []*rulespb.RuleGroupDesc + ownedMap := map[uint32]struct{}{} + for _, g := range owned { + hash := tokenForGroup(g) + ownedMap[hash] = struct{}{} + } for _, g := range ruleGroups { - backup, err := instanceOwnsRuleGroup(ring, g, disabledRuleGroups, instanceAddr, true) + hash := tokenForGroup(g) + // if already owned for eval, don't take backup ownership + if _, OK := ownedMap[hash]; OK { + continue + } + backup, err := r.instanceOwnsRuleGroup(ring, g, r.limits.DisabledRuleGroups(userID), true) if err != nil { switch e := err.(type) { case *DisabledRuleGroupErr: - level.Info(log).Log("msg", e.Message) + level.Info(r.logger).Log("msg", e.Message) continue default: - ringCheckErrors.Inc() - level.Error(log).Log("msg", "failed to check if the ruler replica backs up the rule group", "user", userID, "namespace", g.Namespace, "group", g.Name, "err", err) + r.ringCheckErrors.Inc() + level.Error(r.logger).Log("msg", "failed to check if the ruler replica backs up the rule group", "user", userID, "namespace", g.Namespace, "group", g.Name, "err", err) continue } } if backup { - level.Debug(log).Log("msg", "rule group backed up", "user", g.User, "namespace", g.Namespace, "name", g.Name) + level.Debug(r.logger).Log("msg", "rule group backed up", "user", g.User, "namespace", g.Namespace, "name", g.Name) result = append(result, g) } else { - level.Debug(log).Log("msg", "rule group not backed up, ignoring", "user", g.User, "namespace", g.Namespace, "name", g.Name) + level.Debug(r.logger).Log("msg", "rule group not backed up, ignoring", "user", g.User, "namespace", g.Namespace, "name", g.Name) } } @@ -1174,6 +1297,8 @@ func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest return errors.Wrapf(err, "unable to get client for ruler %s", addr) } + ctx, cancel := context.WithTimeout(ctx, r.cfg.ClientTLSConfig.RemoteTimeout) + defer cancel() newGrps, err := rulerClient.Rules(ctx, &RulesRequest{ RuleNames: rulesRequest.GetRuleNames(), RuleGroupNames: rulesRequest.GetRuleGroupNames(), diff --git a/pkg/ruler/ruler.pb.go b/pkg/ruler/ruler.pb.go index 5eb36c7a6a..b0078f4fbf 100644 --- a/pkg/ruler/ruler.pb.go +++ b/pkg/ruler/ruler.pb.go @@ -137,6 +137,84 @@ func (m *RulesRequest) GetExcludeAlerts() bool { return false } +type LivenessCheckRequest struct { +} + +func (m *LivenessCheckRequest) Reset() { *m = LivenessCheckRequest{} } +func (*LivenessCheckRequest) ProtoMessage() {} +func (*LivenessCheckRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9ecbec0a4cfddea6, []int{1} +} +func (m *LivenessCheckRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LivenessCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LivenessCheckRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LivenessCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LivenessCheckRequest.Merge(m, src) +} +func (m *LivenessCheckRequest) XXX_Size() int { + return m.Size() +} +func (m *LivenessCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LivenessCheckRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LivenessCheckRequest proto.InternalMessageInfo + +type LivenessCheckResponse struct { + State int32 `protobuf:"varint,1,opt,name=state,proto3" json:"state,omitempty"` +} + +func (m *LivenessCheckResponse) Reset() { *m = LivenessCheckResponse{} } +func (*LivenessCheckResponse) ProtoMessage() {} +func (*LivenessCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9ecbec0a4cfddea6, []int{2} +} +func (m *LivenessCheckResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LivenessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LivenessCheckResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LivenessCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LivenessCheckResponse.Merge(m, src) +} +func (m *LivenessCheckResponse) XXX_Size() int { + return m.Size() +} +func (m *LivenessCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LivenessCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LivenessCheckResponse proto.InternalMessageInfo + +func (m *LivenessCheckResponse) GetState() int32 { + if m != nil { + return m.State + } + return 0 +} + type RulesResponse struct { Groups []*GroupStateDesc `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` } @@ -144,7 +222,7 @@ type RulesResponse struct { func (m *RulesResponse) Reset() { *m = RulesResponse{} } func (*RulesResponse) ProtoMessage() {} func (*RulesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{1} + return fileDescriptor_9ecbec0a4cfddea6, []int{3} } func (m *RulesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +269,7 @@ type GroupStateDesc struct { func (m *GroupStateDesc) Reset() { *m = GroupStateDesc{} } func (*GroupStateDesc) ProtoMessage() {} func (*GroupStateDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{2} + return fileDescriptor_9ecbec0a4cfddea6, []int{4} } func (m *GroupStateDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -262,7 +340,7 @@ type RuleStateDesc struct { func (m *RuleStateDesc) Reset() { *m = RuleStateDesc{} } func (*RuleStateDesc) ProtoMessage() {} func (*RuleStateDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{3} + return fileDescriptor_9ecbec0a4cfddea6, []int{5} } func (m *RuleStateDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,7 +434,7 @@ type AlertStateDesc struct { func (m *AlertStateDesc) Reset() { *m = AlertStateDesc{} } func (*AlertStateDesc) ProtoMessage() {} func (*AlertStateDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{4} + return fileDescriptor_9ecbec0a4cfddea6, []int{6} } func (m *AlertStateDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,6 +521,8 @@ func (m *AlertStateDesc) GetKeepFiringSince() time.Time { func init() { proto.RegisterType((*RulesRequest)(nil), "ruler.RulesRequest") + proto.RegisterType((*LivenessCheckRequest)(nil), "ruler.LivenessCheckRequest") + proto.RegisterType((*LivenessCheckResponse)(nil), "ruler.LivenessCheckResponse") proto.RegisterType((*RulesResponse)(nil), "ruler.RulesResponse") proto.RegisterType((*GroupStateDesc)(nil), "ruler.GroupStateDesc") proto.RegisterType((*RuleStateDesc)(nil), "ruler.RuleStateDesc") @@ -452,57 +532,60 @@ func init() { func init() { proto.RegisterFile("ruler.proto", fileDescriptor_9ecbec0a4cfddea6) } var fileDescriptor_9ecbec0a4cfddea6 = []byte{ - // 793 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6b, 0x13, 0x41, - 0x18, 0xde, 0x49, 0x9a, 0xaf, 0x49, 0x3f, 0x70, 0x1a, 0x65, 0x0d, 0x65, 0x13, 0xa2, 0x48, 0x10, - 0xdc, 0x40, 0x2c, 0x88, 0x87, 0x2a, 0x29, 0x6d, 0xbd, 0x88, 0x94, 0x8d, 0x7a, 0x0d, 0x93, 0xcd, - 0x64, 0xb3, 0x76, 0xb3, 0xbb, 0xce, 0xcc, 0x86, 0x7a, 0xf3, 0x27, 0xf4, 0xe8, 0xd9, 0x93, 0x3f, - 0xa5, 0xc7, 0xe2, 0xa9, 0x88, 0x54, 0x9b, 0x5e, 0x3c, 0x49, 0x7f, 0x82, 0xcc, 0xcc, 0x6e, 0x93, - 0xd4, 0x0a, 0x0d, 0xd2, 0x4b, 0x3b, 0xef, 0xc7, 0xf3, 0xce, 0xfb, 0x3e, 0xf3, 0xec, 0x1b, 0x58, - 0xa4, 0x91, 0x47, 0xa8, 0x19, 0xd2, 0x80, 0x07, 0x28, 0x23, 0x8d, 0x72, 0xc9, 0x09, 0x9c, 0x40, - 0x7a, 0x1a, 0xe2, 0xa4, 0x82, 0x65, 0xc3, 0x09, 0x02, 0xc7, 0x23, 0x0d, 0x69, 0x75, 0xa3, 0x7e, - 0xa3, 0x17, 0x51, 0xcc, 0xdd, 0xc0, 0x8f, 0xe3, 0x95, 0xcb, 0x71, 0xee, 0x0e, 0x09, 0xe3, 0x78, - 0x18, 0xc6, 0x09, 0x4f, 0x1d, 0x97, 0x0f, 0xa2, 0xae, 0x69, 0x07, 0xc3, 0x86, 0x1d, 0x50, 0x4e, - 0xf6, 0x43, 0x1a, 0xbc, 0x23, 0x36, 0x8f, 0xad, 0x46, 0xb8, 0xe7, 0x24, 0x81, 0x6e, 0x7c, 0x88, - 0xa1, 0x1b, 0xd7, 0x81, 0xca, 0xe6, 0xe5, 0x5f, 0x16, 0x76, 0xd5, 0x7f, 0x05, 0xaf, 0xfd, 0x06, - 0x70, 0xd1, 0x12, 0xb6, 0x45, 0xde, 0x47, 0x84, 0x71, 0xb4, 0x06, 0x0b, 0x22, 0xfe, 0x0a, 0x0f, - 0x09, 0xd3, 0x41, 0x35, 0x5d, 0x2f, 0x58, 0x13, 0x07, 0x7a, 0x00, 0x97, 0x85, 0xf1, 0x82, 0x06, - 0x51, 0xa8, 0x52, 0x52, 0x32, 0xe5, 0x92, 0x17, 0x95, 0x60, 0xa6, 0xef, 0x7a, 0x84, 0xe9, 0x69, - 0x19, 0x56, 0x06, 0x42, 0x70, 0x81, 0x7f, 0x08, 0x89, 0xbe, 0x50, 0x05, 0xf5, 0x82, 0x25, 0xcf, - 0x22, 0x93, 0x71, 0xcc, 0x89, 0x9e, 0x91, 0x4e, 0x65, 0xa0, 0x3b, 0x30, 0x3b, 0x20, 0xd8, 0xe3, - 0x03, 0x3d, 0x2b, 0xdd, 0xb1, 0x85, 0xca, 0x30, 0x3f, 0xc4, 0xdc, 0x1e, 0x10, 0xca, 0xf4, 0x9c, - 0x2c, 0x7d, 0x61, 0xa3, 0xfb, 0x70, 0x89, 0xec, 0xdb, 0x5e, 0xd4, 0x23, 0x2d, 0x8f, 0x50, 0xce, - 0xf4, 0x7c, 0x15, 0xd4, 0xf3, 0xd6, 0xac, 0xb3, 0xf6, 0x0c, 0x2e, 0xc5, 0xf3, 0xb2, 0x30, 0xf0, - 0x19, 0x41, 0x8f, 0x60, 0xd6, 0x11, 0x8d, 0xab, 0x69, 0x8b, 0xcd, 0xdb, 0xa6, 0x7a, 0x77, 0x39, - 0x4d, 0x5b, 0x74, 0xb3, 0x45, 0x98, 0x6d, 0xc5, 0x49, 0xb5, 0xcf, 0x29, 0xb8, 0x3c, 0x1b, 0x42, - 0x0f, 0x61, 0x46, 0x06, 0x75, 0x50, 0x05, 0xf5, 0x62, 0xb3, 0x64, 0x2a, 0x82, 0xad, 0x84, 0x12, - 0x89, 0x57, 0x29, 0xe8, 0x09, 0x5c, 0xc4, 0x36, 0x77, 0x47, 0xa4, 0x23, 0x93, 0x24, 0x7d, 0x09, - 0x84, 0x4a, 0xc8, 0xe4, 0xca, 0xa2, 0xca, 0x94, 0xed, 0xa2, 0xb7, 0x70, 0x95, 0x8c, 0xb0, 0x17, - 0x49, 0x5d, 0xbd, 0x4e, 0xf4, 0xa3, 0xa7, 0xe5, 0x95, 0x65, 0x53, 0x29, 0xcc, 0x4c, 0x14, 0x66, - 0x5e, 0x64, 0x6c, 0xe6, 0x0f, 0x4f, 0x2a, 0xda, 0xc1, 0x8f, 0x0a, 0xb0, 0xae, 0x2a, 0x80, 0xda, - 0x10, 0x4d, 0xdc, 0x5b, 0xb1, 0x6e, 0xe5, 0x0b, 0x15, 0x9b, 0x77, 0xff, 0x2a, 0x9b, 0x24, 0xa8, - 0xaa, 0x9f, 0x44, 0xd5, 0x2b, 0xe0, 0xb5, 0xef, 0x29, 0xc5, 0xf2, 0x84, 0xa3, 0x7b, 0x70, 0x41, - 0x8c, 0x18, 0x53, 0xb4, 0x32, 0x45, 0x91, 0x1c, 0x55, 0x06, 0x27, 0x5a, 0x48, 0x5d, 0xad, 0x85, - 0xf4, 0x8c, 0x16, 0xd6, 0x60, 0xc1, 0xc3, 0x8c, 0x6f, 0x53, 0x1a, 0xd0, 0x58, 0x52, 0x13, 0x87, - 0x78, 0x56, 0xac, 0x64, 0x90, 0x99, 0x79, 0x56, 0x29, 0x83, 0xa9, 0x67, 0x55, 0x49, 0xff, 0xa2, - 0x37, 0x7b, 0x33, 0xf4, 0xe6, 0xfe, 0x8f, 0xde, 0xaf, 0x19, 0xb8, 0x3c, 0x3b, 0xc7, 0x84, 0x3a, - 0x30, 0x4d, 0x9d, 0x0f, 0xb3, 0x1e, 0xee, 0x12, 0x2f, 0xd1, 0xd9, 0xaa, 0x99, 0x2c, 0x11, 0xf3, - 0xa5, 0xf0, 0xef, 0x62, 0x97, 0x6e, 0xb6, 0xc4, 0x5d, 0xdf, 0x4e, 0x2a, 0x73, 0x2d, 0x21, 0x85, - 0x6f, 0xf5, 0x70, 0xc8, 0x09, 0xb5, 0xe2, 0x5b, 0xd0, 0x3e, 0x2c, 0x62, 0xdf, 0x0f, 0xb8, 0x6c, - 0x53, 0x7d, 0xfc, 0x37, 0x77, 0xe9, 0xf4, 0x55, 0x62, 0x7e, 0xc1, 0x93, 0xda, 0x2d, 0xc0, 0x52, - 0x06, 0x6a, 0xc1, 0x42, 0xfc, 0xb5, 0x61, 0x2e, 0x17, 0xcc, 0x75, 0xdf, 0x32, 0xaf, 0x60, 0x2d, - 0x8e, 0x9e, 0xc3, 0x7c, 0xdf, 0xa5, 0xa4, 0x27, 0x2a, 0xcc, 0xa3, 0x86, 0x9c, 0x44, 0xb5, 0x38, - 0xda, 0x86, 0x45, 0x4a, 0x58, 0xe0, 0x8d, 0x54, 0x8d, 0xdc, 0x1c, 0x35, 0x60, 0x02, 0x6c, 0x71, - 0xb4, 0x03, 0x17, 0x85, 0xb8, 0x3b, 0x8c, 0xf8, 0x5c, 0xd4, 0xc9, 0xcf, 0x53, 0x47, 0x20, 0xdb, - 0xc4, 0xe7, 0xaa, 0x9d, 0x11, 0xf6, 0xdc, 0x5e, 0x27, 0xf2, 0xb9, 0xeb, 0xe9, 0x85, 0x79, 0xca, - 0x48, 0xe0, 0x1b, 0x81, 0x43, 0xbb, 0xf0, 0xd6, 0x1e, 0x21, 0x61, 0xa7, 0xef, 0x52, 0xd7, 0x77, - 0x3a, 0xcc, 0xf5, 0x6d, 0xa2, 0xc3, 0x39, 0x8a, 0xad, 0x08, 0xf8, 0x8e, 0x44, 0xb7, 0x05, 0xb8, - 0xb9, 0x01, 0x33, 0x62, 0x1d, 0x50, 0xb4, 0xae, 0x0e, 0x0c, 0xad, 0x4e, 0x6d, 0xc5, 0xe4, 0xf7, - 0xa9, 0x5c, 0x9a, 0x75, 0xaa, 0x25, 0x5e, 0xd3, 0x36, 0xd7, 0x8f, 0x4e, 0x0d, 0xed, 0xf8, 0xd4, - 0xd0, 0xce, 0x4f, 0x0d, 0xf0, 0x71, 0x6c, 0x80, 0x2f, 0x63, 0x03, 0x1c, 0x8e, 0x0d, 0x70, 0x34, - 0x36, 0xc0, 0xcf, 0xb1, 0x01, 0x7e, 0x8d, 0x0d, 0xed, 0x7c, 0x6c, 0x80, 0x83, 0x33, 0x43, 0x3b, - 0x3a, 0x33, 0xb4, 0xe3, 0x33, 0x43, 0xeb, 0x66, 0x65, 0x8f, 0x8f, 0xff, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x3a, 0xeb, 0x6f, 0xc9, 0xec, 0x07, 0x00, 0x00, + // 845 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xf6, 0xb4, 0x75, 0x9a, 0xbc, 0xb4, 0x5d, 0x31, 0xcd, 0xae, 0x4c, 0xa8, 0x9c, 0x28, 0x20, + 0x14, 0x21, 0xad, 0x23, 0x85, 0x95, 0x10, 0x07, 0x40, 0x29, 0xbb, 0xcb, 0xa5, 0x42, 0x2b, 0x07, + 0xb8, 0x46, 0x13, 0x67, 0xe2, 0x98, 0x3a, 0xb6, 0x99, 0x19, 0x47, 0xe5, 0xc6, 0x9d, 0xcb, 0x1e, + 0x39, 0x73, 0xe2, 0x4f, 0xd9, 0x63, 0xc5, 0x69, 0x85, 0xd0, 0x42, 0xd3, 0x0b, 0x27, 0xb4, 0x7f, + 0x02, 0x9a, 0x1f, 0x6e, 0xe2, 0x12, 0xa4, 0x8d, 0x50, 0x2f, 0xed, 0xbc, 0x1f, 0xdf, 0x9b, 0x79, + 0xdf, 0xfb, 0xf2, 0x0c, 0x75, 0x96, 0xc7, 0x94, 0x79, 0x19, 0x4b, 0x45, 0x8a, 0x6d, 0x65, 0x34, + 0x1b, 0x61, 0x1a, 0xa6, 0xca, 0xd3, 0x93, 0x27, 0x1d, 0x6c, 0xba, 0x61, 0x9a, 0x86, 0x31, 0xed, + 0x29, 0x6b, 0x9c, 0x4f, 0x7b, 0x93, 0x9c, 0x11, 0x11, 0xa5, 0x89, 0x89, 0xb7, 0x6e, 0xc7, 0x45, + 0x34, 0xa7, 0x5c, 0x90, 0x79, 0x66, 0x12, 0x3e, 0x0e, 0x23, 0x31, 0xcb, 0xc7, 0x5e, 0x90, 0xce, + 0x7b, 0x41, 0xca, 0x04, 0xbd, 0xc8, 0x58, 0xfa, 0x2d, 0x0d, 0x84, 0xb1, 0x7a, 0xd9, 0x79, 0x58, + 0x04, 0xc6, 0xe6, 0x60, 0xa0, 0x9f, 0xbc, 0x09, 0x54, 0x3d, 0x5e, 0xfd, 0xe5, 0xd9, 0x58, 0xff, + 0xd7, 0xf0, 0xce, 0xdf, 0x08, 0x0e, 0x7c, 0x69, 0xfb, 0xf4, 0xbb, 0x9c, 0x72, 0x81, 0x4f, 0xa0, + 0x26, 0xe3, 0x5f, 0x92, 0x39, 0xe5, 0x0e, 0x6a, 0xef, 0x76, 0x6b, 0xfe, 0xca, 0x81, 0xdf, 0x87, + 0x23, 0x69, 0x7c, 0xc1, 0xd2, 0x3c, 0xd3, 0x29, 0x3b, 0x2a, 0xe5, 0x96, 0x17, 0x37, 0xc0, 0x9e, + 0x46, 0x31, 0xe5, 0xce, 0xae, 0x0a, 0x6b, 0x03, 0x63, 0xd8, 0x13, 0xdf, 0x67, 0xd4, 0xd9, 0x6b, + 0xa3, 0x6e, 0xcd, 0x57, 0x67, 0x99, 0xc9, 0x05, 0x11, 0xd4, 0xb1, 0x95, 0x53, 0x1b, 0xf8, 0x01, + 0x54, 0x66, 0x94, 0xc4, 0x62, 0xe6, 0x54, 0x94, 0xdb, 0x58, 0xb8, 0x09, 0xd5, 0x39, 0x11, 0xc1, + 0x8c, 0x32, 0xee, 0xec, 0xab, 0xd2, 0x37, 0x36, 0x7e, 0x0f, 0x0e, 0xe9, 0x45, 0x10, 0xe7, 0x13, + 0x3a, 0x88, 0x29, 0x13, 0xdc, 0xa9, 0xb6, 0x51, 0xb7, 0xea, 0x97, 0x9d, 0x9d, 0x07, 0xd0, 0x38, + 0x8b, 0x16, 0x34, 0xa1, 0x9c, 0x7f, 0x3e, 0xa3, 0xc1, 0xb9, 0xe9, 0xbb, 0xf3, 0x10, 0xee, 0xdf, + 0xf2, 0xf3, 0x2c, 0x4d, 0xf8, 0xda, 0x03, 0x51, 0x1b, 0x75, 0x6d, 0xf3, 0xc0, 0xce, 0xa7, 0x70, + 0x68, 0x68, 0x33, 0x69, 0x0f, 0xa1, 0x12, 0xca, 0xfe, 0x35, 0x69, 0xf5, 0xfe, 0x7d, 0x4f, 0xcb, + 0x47, 0x91, 0x32, 0x94, 0x98, 0xc7, 0x94, 0x07, 0xbe, 0x49, 0xea, 0xfc, 0xbc, 0x03, 0x47, 0xe5, + 0x10, 0xfe, 0x00, 0x6c, 0x15, 0x54, 0x17, 0xd5, 0xfb, 0x0d, 0x4f, 0xcf, 0xc9, 0x2f, 0x98, 0x55, + 0x78, 0x9d, 0x82, 0x3f, 0x82, 0x03, 0x12, 0x88, 0x68, 0x41, 0x47, 0x2a, 0x49, 0x4d, 0xa1, 0x80, + 0x30, 0x05, 0x59, 0x5d, 0x59, 0xd7, 0x99, 0xea, 0xb9, 0xf8, 0x1b, 0x38, 0xa6, 0x0b, 0x12, 0xe7, + 0x4a, 0x9e, 0x5f, 0x15, 0x32, 0x74, 0x76, 0xd5, 0x95, 0x4d, 0x4f, 0x0b, 0xd5, 0x2b, 0x84, 0xea, + 0xdd, 0x64, 0x9c, 0x56, 0x5f, 0xbc, 0x6a, 0x59, 0xcf, 0xff, 0x68, 0x21, 0x7f, 0x53, 0x01, 0x3c, + 0x04, 0xbc, 0x72, 0x3f, 0x36, 0xf2, 0x57, 0x83, 0xae, 0xf7, 0xdf, 0xfe, 0x57, 0xd9, 0x22, 0x41, + 0x57, 0xfd, 0x49, 0x56, 0xdd, 0x00, 0xef, 0xfc, 0xbe, 0xa3, 0x59, 0x5e, 0x71, 0xf4, 0x2e, 0xec, + 0xc9, 0x16, 0x0d, 0x45, 0xf7, 0xd6, 0x28, 0x52, 0xad, 0xaa, 0xe0, 0x6a, 0x62, 0x3b, 0x9b, 0x25, + 0xb5, 0x5b, 0x92, 0xd4, 0x09, 0xd4, 0x62, 0xc2, 0xc5, 0x13, 0xc6, 0x52, 0x66, 0x94, 0xb9, 0x72, + 0xc8, 0xb1, 0x12, 0xad, 0x26, 0xbb, 0x34, 0x56, 0xa5, 0xa6, 0xb5, 0xb1, 0xea, 0xa4, 0xff, 0xa2, + 0xb7, 0x72, 0x37, 0xf4, 0xee, 0xff, 0x3f, 0x7a, 0x7f, 0xb5, 0xe1, 0xa8, 0xdc, 0x47, 0x59, 0xec, + 0x37, 0xd4, 0x25, 0x50, 0x89, 0xc9, 0x98, 0xc6, 0x85, 0xce, 0x8e, 0xbd, 0x62, 0x17, 0x79, 0x67, + 0xd2, 0xff, 0x8c, 0x44, 0xec, 0x74, 0x20, 0xef, 0xfa, 0xed, 0x55, 0x6b, 0xab, 0x5d, 0xa6, 0xf1, + 0x83, 0x09, 0xc9, 0x04, 0x65, 0xbe, 0xb9, 0x05, 0x5f, 0x40, 0x9d, 0x24, 0x49, 0x2a, 0xd4, 0x33, + 0xf5, 0x0e, 0xb9, 0xbb, 0x4b, 0xd7, 0xaf, 0x92, 0xfd, 0x4b, 0x9e, 0xf4, 0x8a, 0x42, 0xbe, 0x36, + 0xf0, 0x00, 0x6a, 0xe6, 0xd7, 0x46, 0x84, 0xda, 0x53, 0x6f, 0x3a, 0xcb, 0xaa, 0x86, 0x0d, 0x04, + 0xfe, 0x0c, 0xaa, 0xd3, 0x88, 0xd1, 0x89, 0xac, 0xb0, 0x8d, 0x1a, 0xf6, 0x15, 0x6a, 0x20, 0xf0, + 0x13, 0xa8, 0x33, 0xca, 0xd3, 0x78, 0xa1, 0x6b, 0xec, 0x6f, 0x51, 0x03, 0x0a, 0xe0, 0x40, 0xe0, + 0xa7, 0x70, 0x20, 0xc5, 0x3d, 0xe2, 0x34, 0x11, 0xb2, 0x4e, 0x75, 0x9b, 0x3a, 0x12, 0x39, 0xa4, + 0x89, 0xd0, 0xcf, 0x59, 0x90, 0x38, 0x9a, 0x8c, 0xf2, 0x44, 0x44, 0xb1, 0x53, 0xdb, 0xa6, 0x8c, + 0x02, 0x7e, 0x2d, 0x71, 0xf8, 0x19, 0xbc, 0x75, 0x4e, 0x69, 0x36, 0x9a, 0x46, 0x2c, 0x4a, 0xc2, + 0x11, 0x8f, 0x92, 0x80, 0x3a, 0xb0, 0x45, 0xb1, 0x7b, 0x12, 0xfe, 0x54, 0xa1, 0x87, 0x12, 0xdc, + 0xff, 0x11, 0x81, 0x2d, 0xf7, 0x01, 0xc3, 0x8f, 0xf4, 0x81, 0xe3, 0xe3, 0xb5, 0xb5, 0x58, 0x7c, + 0xe7, 0x9a, 0x8d, 0xb2, 0x53, 0x6f, 0xf1, 0x8e, 0x85, 0xcf, 0xe0, 0xb0, 0xf4, 0x1d, 0xc0, 0xef, + 0x98, 0xc4, 0x4d, 0x5f, 0x8d, 0xe6, 0xc9, 0xe6, 0x60, 0x51, 0xed, 0xf4, 0xd1, 0xe5, 0x95, 0x6b, + 0xbd, 0xbc, 0x72, 0xad, 0xd7, 0x57, 0x2e, 0xfa, 0x61, 0xe9, 0xa2, 0x5f, 0x96, 0x2e, 0x7a, 0xb1, + 0x74, 0xd1, 0xe5, 0xd2, 0x45, 0x7f, 0x2e, 0x5d, 0xf4, 0xd7, 0xd2, 0xb5, 0x5e, 0x2f, 0x5d, 0xf4, + 0xfc, 0xda, 0xb5, 0x2e, 0xaf, 0x5d, 0xeb, 0xe5, 0xb5, 0x6b, 0x8d, 0x2b, 0xaa, 0xe5, 0x0f, 0xff, + 0x09, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x1f, 0x82, 0x20, 0x82, 0x08, 0x00, 0x00, } func (this *RulesRequest) Equal(that interface{}) bool { @@ -570,6 +653,51 @@ func (this *RulesRequest) Equal(that interface{}) bool { } return true } +func (this *LivenessCheckRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LivenessCheckRequest) + if !ok { + that2, ok := that.(LivenessCheckRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *LivenessCheckResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LivenessCheckResponse) + if !ok { + that2, ok := that.(LivenessCheckResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.State != that1.State { + return false + } + return true +} func (this *RulesResponse) Equal(that interface{}) bool { if that == nil { return this == nil @@ -762,6 +890,25 @@ func (this *RulesRequest) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *LivenessCheckRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&ruler.LivenessCheckRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LivenessCheckResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&ruler.LivenessCheckResponse{") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *RulesResponse) GoString() string { if this == nil { return "nil" @@ -852,6 +999,7 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type RulerClient interface { Rules(ctx context.Context, in *RulesRequest, opts ...grpc.CallOption) (*RulesResponse, error) + LivenessCheck(ctx context.Context, in *LivenessCheckRequest, opts ...grpc.CallOption) (*LivenessCheckResponse, error) } type rulerClient struct { @@ -871,9 +1019,19 @@ func (c *rulerClient) Rules(ctx context.Context, in *RulesRequest, opts ...grpc. return out, nil } +func (c *rulerClient) LivenessCheck(ctx context.Context, in *LivenessCheckRequest, opts ...grpc.CallOption) (*LivenessCheckResponse, error) { + out := new(LivenessCheckResponse) + err := c.cc.Invoke(ctx, "/ruler.Ruler/LivenessCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // RulerServer is the server API for Ruler service. type RulerServer interface { Rules(context.Context, *RulesRequest) (*RulesResponse, error) + LivenessCheck(context.Context, *LivenessCheckRequest) (*LivenessCheckResponse, error) } // UnimplementedRulerServer can be embedded to have forward compatible implementations. @@ -883,6 +1041,9 @@ type UnimplementedRulerServer struct { func (*UnimplementedRulerServer) Rules(ctx context.Context, req *RulesRequest) (*RulesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Rules not implemented") } +func (*UnimplementedRulerServer) LivenessCheck(ctx context.Context, req *LivenessCheckRequest) (*LivenessCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LivenessCheck not implemented") +} func RegisterRulerServer(s *grpc.Server, srv RulerServer) { s.RegisterService(&_Ruler_serviceDesc, srv) @@ -906,6 +1067,24 @@ func _Ruler_Rules_Handler(srv interface{}, ctx context.Context, dec func(interfa return interceptor(ctx, in, info, handler) } +func _Ruler_LivenessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LivenessCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RulerServer).LivenessCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ruler.Ruler/LivenessCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RulerServer).LivenessCheck(ctx, req.(*LivenessCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Ruler_serviceDesc = grpc.ServiceDesc{ ServiceName: "ruler.Ruler", HandlerType: (*RulerServer)(nil), @@ -914,6 +1093,10 @@ var _Ruler_serviceDesc = grpc.ServiceDesc{ MethodName: "Rules", Handler: _Ruler_Rules_Handler, }, + { + MethodName: "LivenessCheck", + Handler: _Ruler_LivenessCheck_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "ruler.proto", @@ -1009,6 +1192,57 @@ func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LivenessCheckRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LivenessCheckRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LivenessCheckRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *LivenessCheckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LivenessCheckResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LivenessCheckResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.State != 0 { + i = encodeVarintRuler(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *RulesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1368,6 +1602,27 @@ func (m *RulesRequest) Size() (n int) { return n } +func (m *LivenessCheckRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *LivenessCheckResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovRuler(uint64(m.State)) + } + return n +} + func (m *RulesResponse) Size() (n int) { if m == nil { return 0 @@ -1504,6 +1759,25 @@ func (this *RulesRequest) String() string { }, "") return s } +func (this *LivenessCheckRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LivenessCheckRequest{`, + `}`, + }, "") + return s +} +func (this *LivenessCheckResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LivenessCheckResponse{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} func (this *RulesResponse) String() string { if this == nil { return "nil" @@ -1882,6 +2156,131 @@ func (m *RulesRequest) Unmarshal(dAtA []byte) error { } return nil } +func (m *LivenessCheckRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LivenessCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LivenessCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRuler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LivenessCheckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LivenessCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LivenessCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRuler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RulesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/ruler/ruler.proto b/pkg/ruler/ruler.proto index 42828f7352..22745ead97 100644 --- a/pkg/ruler/ruler.proto +++ b/pkg/ruler/ruler.proto @@ -17,6 +17,7 @@ option (gogoproto.unmarshaler_all) = true; service Ruler { rpc Rules(RulesRequest) returns (RulesResponse) {}; + rpc LivenessCheck(LivenessCheckRequest) returns (LivenessCheckResponse){}; } message RulesRequest { @@ -30,6 +31,12 @@ message RulesRequest { bool excludeAlerts = 8; } +message LivenessCheckRequest{} + +message LivenessCheckResponse{ + int32 state = 1; +} + message RulesResponse { repeated GroupStateDesc groups = 1; } diff --git a/pkg/ruler/ruler_client_config.go b/pkg/ruler/ruler_client_config.go new file mode 100644 index 0000000000..c4d019b660 --- /dev/null +++ b/pkg/ruler/ruler_client_config.go @@ -0,0 +1,18 @@ +package ruler + +import ( + "flag" + "time" + + "github.com/cortexproject/cortex/pkg/util/grpcclient" +) + +type ClientConfig struct { + grpcclient.Config `yaml:",inline"` + RemoteTimeout time.Duration `yaml:"remote_timeout"` +} + +func (cfg *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.Config.RegisterFlagsWithPrefix(prefix, "", f) + f.DurationVar(&cfg.RemoteTimeout, prefix+".remote-timeout", 2*time.Minute, "Timeout for downstream rulers.") +} diff --git a/pkg/ruler/ruler_ring.go b/pkg/ruler/ruler_ring.go index 534dbb67da..215a711f02 100644 --- a/pkg/ruler/ruler_ring.go +++ b/pkg/ruler/ruler_ring.go @@ -53,8 +53,8 @@ type RingConfig struct { InstanceZone string `yaml:"instance_availability_zone" doc:"hidden"` NumTokens int `yaml:"num_tokens"` - FinalSleep time.Duration `yaml:"final_sleep"` - + FinalSleep time.Duration `yaml:"final_sleep"` + KeepInstanceInTheRingOnShutdown bool `yaml:"keep_instance_in_the_ring_on_shutdown"` // Injected internally ListenPort int `yaml:"-"` @@ -86,6 +86,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.InstanceID, "ruler.ring.instance-id", hostname, "Instance ID to register in the ring.") f.StringVar(&cfg.InstanceZone, "ruler.ring.instance-availability-zone", "", "The availability zone where this instance is running. Required if zone-awareness is enabled.") f.IntVar(&cfg.NumTokens, "ruler.ring.num-tokens", 128, "Number of tokens for each ruler.") + f.BoolVar(&cfg.KeepInstanceInTheRingOnShutdown, "ruler.ring.keep-instance-in-the-ring-on-shutdown", false, "Keep instance in the ring on shut down.") } // ToLifecyclerConfig returns a LifecyclerConfig based on the ruler @@ -99,13 +100,14 @@ func (cfg *RingConfig) ToLifecyclerConfig(logger log.Logger) (ring.BasicLifecycl instancePort := ring.GetInstancePort(cfg.InstancePort, cfg.ListenPort) return ring.BasicLifecyclerConfig{ - ID: cfg.InstanceID, - Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), - Zone: cfg.InstanceZone, - HeartbeatPeriod: cfg.HeartbeatPeriod, - TokensObservePeriod: 0, - NumTokens: cfg.NumTokens, - FinalSleep: cfg.FinalSleep, + ID: cfg.InstanceID, + Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), + Zone: cfg.InstanceZone, + HeartbeatPeriod: cfg.HeartbeatPeriod, + TokensObservePeriod: 0, + NumTokens: cfg.NumTokens, + FinalSleep: cfg.FinalSleep, + KeepInstanceInTheRingOnShutdown: cfg.KeepInstanceInTheRingOnShutdown, }, nil } diff --git a/pkg/ruler/ruler_ring_test.go b/pkg/ruler/ruler_ring_test.go index 95f8009912..4b740eea69 100644 --- a/pkg/ruler/ruler_ring_test.go +++ b/pkg/ruler/ruler_ring_test.go @@ -71,6 +71,38 @@ func TestGetReplicationSetForListRule(t *testing.T) { expectedSet: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, expectedMaxError: 1, }, + "max errors must be 0 when RF=3 and healthy instances=1": { + ringInstances: map[string]ring.InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: ring.ACTIVE, Timestamp: now.Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-1", "", 128, true)}, + }, + ringHeartbeatTimeout: time.Minute, + ringReplicationFactor: 3, + expectedSet: []string{"127.0.0.1"}, + expectedMaxError: 0, + }, + "max errors must be 1 when RF=3 and healthy instances=2": { + ringInstances: map[string]ring.InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: ring.ACTIVE, Timestamp: now.Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-1", "", 128, true)}, + "instance-2": {Addr: "127.0.0.2", State: ring.ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-2", "", 128, true)}, + }, + ringHeartbeatTimeout: time.Minute, + ringReplicationFactor: 3, + expectedSet: []string{"127.0.0.1", "127.0.0.2"}, + expectedMaxError: 1, + }, + "max errors must be 1 when RF=2 and healthy instances=5": { + ringInstances: map[string]ring.InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: ring.ACTIVE, Timestamp: now.Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-1", "", 128, true)}, + "instance-2": {Addr: "127.0.0.2", State: ring.ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-2", "", 128, true)}, + "instance-3": {Addr: "127.0.0.3", State: ring.ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-3", "", 128, true)}, + "instance-4": {Addr: "127.0.0.4", State: ring.ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-4", "", 128, true)}, + "instance-5": {Addr: "127.0.0.5", State: ring.ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-5", "", 128, true)}, + }, + ringHeartbeatTimeout: time.Minute, + ringReplicationFactor: 2, + expectedSet: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + expectedMaxError: 1, + }, "should succeed on 2 unhealthy instances and RF=3": { ringInstances: map[string]ring.InstanceDesc{ "instance-1": {Addr: "127.0.0.1", State: ring.ACTIVE, Timestamp: now.Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-1", "", 128, true)}, @@ -144,7 +176,7 @@ func TestGetReplicationSetForListRule(t *testing.T) { }, expectedMaxUnavailableZones: 2, }, - "should fail on 3 unhealthy instances in 3 zonez and RF=3 zone replication enabled": { + "should fail on 3 unhealthy instances in 3 zones and RF=3 zone replication enabled": { ringInstances: map[string]ring.InstanceDesc{ "instance-1": {Addr: "127.0.0.1", State: ring.ACTIVE, Timestamp: now.Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-1", "z1", 128, true), Zone: "z1"}, "instance-2": {Addr: "127.0.0.2", State: ring.ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(ring.NewDesc(), "instance-2", "z2", 128, true), Zone: "z2"}, @@ -223,7 +255,7 @@ func TestGetReplicationSetForListRule(t *testing.T) { } r, _ := buildRuler(t, cfg, nil, store, nil) - r.limits = ruleLimits{evalDelay: 0} + r.limits = ruleLimits{} rulerRing := r.ring // We start ruler's ring, but nothing else (not even lifecycler). diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index befe7d01f4..7262b7179e 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -2,6 +2,7 @@ package ruler import ( "context" + "errors" "fmt" "io" "math/rand" @@ -82,7 +83,6 @@ func defaultRulerConfig(t testing.TB) Config { } type ruleLimits struct { - evalDelay time.Duration tenantShard int maxRulesPerRuleGroup int maxRuleGroups int @@ -91,10 +91,6 @@ type ruleLimits struct { queryOffset time.Duration } -func (r ruleLimits) EvaluationDelay(_ string) time.Duration { - return r.evalDelay -} - func (r ruleLimits) RulerTenantShardSize(_ string) int { return r.tenantShard } @@ -178,7 +174,7 @@ func testSetup(t *testing.T, querierTestConfig *querier.TestConfig) (*promql.Eng reg := prometheus.NewRegistry() queryable := testQueryableFunc(querierTestConfig, reg, l) - return engine, queryable, pusher, l, ruleLimits{evalDelay: 0, maxRuleGroups: 20, maxRulesPerRuleGroup: 15}, reg + return engine, queryable, pusher, l, ruleLimits{maxRuleGroups: 20, maxRulesPerRuleGroup: 15}, reg } func newManager(t *testing.T, cfg Config) *DefaultMultiTenantManager { @@ -208,6 +204,16 @@ func (c *mockRulerClient) Rules(ctx context.Context, in *RulesRequest, _ ...grpc return c.ruler.Rules(ctx, in) } +func (c *mockRulerClient) LivenessCheck(ctx context.Context, in *LivenessCheckRequest, opts ...grpc.CallOption) (*LivenessCheckResponse, error) { + + if c.ruler.State() == services.Terminated { + return nil, errors.New("ruler is terminated") + } + return &LivenessCheckResponse{ + State: int32(services.Running), + }, nil +} + func (p *mockRulerClientsPool) GetClientFor(addr string) (RulerClient, error) { for _, r := range p.rulerAddrMap { if r.lifecycler.GetInstanceAddr() == addr { @@ -223,7 +229,7 @@ func (p *mockRulerClientsPool) GetClientFor(addr string) (RulerClient, error) { func newMockClientsPool(cfg Config, logger log.Logger, reg prometheus.Registerer, rulerAddrMap map[string]*Ruler) *mockRulerClientsPool { return &mockRulerClientsPool{ - ClientsPool: newRulerClientPool(cfg.ClientTLSConfig, logger, reg), + ClientsPool: newRulerClientPool(cfg.ClientTLSConfig.Config, logger, reg), cfg: cfg, rulerAddrMap: rulerAddrMap, } @@ -252,10 +258,23 @@ func buildRuler(t *testing.T, rulerConfig Config, querierTestConfig *querier.Tes func newTestRuler(t *testing.T, rulerConfig Config, store rulestore.RuleStore, querierTestConfig *querier.TestConfig) *Ruler { ruler, _ := buildRuler(t, rulerConfig, querierTestConfig, store, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ruler)) + rgs, err := store.ListAllRuleGroups(context.Background()) + require.NoError(t, err) - // Ensure all rules are loaded before usage - ruler.syncRules(context.Background(), rulerSyncReasonInitial) - + // Wait to ensure syncRules has finished and all rules are loaded before usage + deadline := time.Now().Add(3 * time.Second) + for { + loaded := true + for tenantId := range rgs { + if len(ruler.manager.GetRules(tenantId)) == 0 { + loaded = false + } + } + if time.Now().After(deadline) || loaded { + break + } + time.Sleep(50 * time.Millisecond) + } return ruler } @@ -538,32 +557,32 @@ func TestGetRules(t *testing.T) { expectedRules := expectedRulesMap{ "ruler1": map[string]rulespb.RuleGroupList{ "user1": { - &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "first", Interval: 10 * time.Second, Rules: ruleMap["ruler1-user1-rule-group1"]}, - &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "second", Interval: 10 * time.Second, Rules: ruleMap["ruler1-user1-rule-group2"]}, + &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "first", Interval: 10 * time.Minute, Rules: ruleMap["ruler1-user1-rule-group1"]}, + &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "second", Interval: 10 * time.Minute, Rules: ruleMap["ruler1-user1-rule-group2"]}, }, "user2": { - &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "third", Interval: 10 * time.Second, Rules: ruleMap["ruler1-user2-rule-group1"]}, + &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "third", Interval: 10 * time.Minute, Rules: ruleMap["ruler1-user2-rule-group1"]}, }, }, "ruler2": map[string]rulespb.RuleGroupList{ "user1": { - &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "third", Interval: 10 * time.Second, Rules: ruleMap["ruler2-user1-rule-group3"]}, + &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "third", Interval: 10 * time.Minute, Rules: ruleMap["ruler2-user1-rule-group3"]}, }, "user2": { - &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "first", Interval: 10 * time.Second, Rules: ruleMap["ruler2-user2-rule-group1"]}, - &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "second", Interval: 10 * time.Second, Rules: ruleMap["ruler2-user2-rule-group2"]}, + &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "first", Interval: 10 * time.Minute, Rules: ruleMap["ruler2-user2-rule-group1"]}, + &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "second", Interval: 10 * time.Minute, Rules: ruleMap["ruler2-user2-rule-group2"]}, }, "user3": { - &rulespb.RuleGroupDesc{User: "user3", Namespace: "latency-test", Name: "first", Interval: 10 * time.Second, Rules: ruleMap["ruler2-user3-rule-group1"]}, + &rulespb.RuleGroupDesc{User: "user3", Namespace: "latency-test", Name: "first", Interval: 10 * time.Minute, Rules: ruleMap["ruler2-user3-rule-group1"]}, }, }, "ruler3": map[string]rulespb.RuleGroupList{ "user3": { - &rulespb.RuleGroupDesc{User: "user3", Namespace: "namespace", Name: "third", Interval: 10 * time.Second, Rules: ruleMap["ruler3-user3-rule-group1"]}, + &rulespb.RuleGroupDesc{User: "user3", Namespace: "namespace", Name: "third", Interval: 10 * time.Minute, Rules: ruleMap["ruler3-user3-rule-group1"]}, }, "user2": { - &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "forth", Interval: 10 * time.Second, Rules: ruleMap["ruler3-user2-rule-group1"]}, - &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "fifty", Interval: 10 * time.Second, Rules: ruleMap["ruler3-user2-rule-group2"]}, + &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "forth", Interval: 10 * time.Minute, Rules: ruleMap["ruler3-user2-rule-group1"]}, + &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "fifty", Interval: 10 * time.Minute, Rules: ruleMap["ruler3-user2-rule-group2"]}, }, }, } @@ -971,7 +990,7 @@ func TestGetRules(t *testing.T) { } r, _ := buildRuler(t, cfg, nil, store, rulerAddrMap) - r.limits = ruleLimits{evalDelay: 0, tenantShard: tc.shuffleShardSize} + r.limits = ruleLimits{tenantShard: tc.shuffleShardSize} rulerAddrMap[id] = r if r.ring != nil { require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.ring)) @@ -1208,7 +1227,7 @@ func TestGetRulesFromBackup(t *testing.T) { } r, _ := buildRuler(t, cfg, nil, store, rulerAddrMap) - r.limits = ruleLimits{evalDelay: 0, tenantShard: 3} + r.limits = ruleLimits{tenantShard: 3} rulerAddrMap[id] = r if r.ring != nil { require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.ring)) @@ -1322,6 +1341,233 @@ func TestGetRulesFromBackup(t *testing.T) { require.Equal(t, "rtest_user1_1", ruleStateDescriptions[0].ActiveRules[0].Rule.Record) } +func TestGetRules_HA(t *testing.T) { + t.Run("Test RF = 2", getRulesHATest(2)) + t.Run("Test RF = 3", getRulesHATest(3)) +} + +func getRulesHATest(replicationFactor int) func(t *testing.T) { + return func(t *testing.T) { + // ruler ID -> (user ID -> list of groups). + type expectedRulesMap map[string]map[string]rulespb.RuleGroupList + + rule := []*rulespb.RuleDesc{ + { + Record: "rtest_user1_1", + Expr: "sum(rate(node_cpu_seconds_total[3h:10m]))", + }, + { + Alert: "atest_user1_1", + Expr: "sum(rate(node_cpu_seconds_total[3h:10m]))", + }, + { + Record: "rtest_user1_2", + Expr: "sum(rate(node_cpu_seconds_total[3h:10m]))", + Labels: []cortexpb.LabelAdapter{ + {Name: "key", Value: "val"}, + }, + }, + { + Alert: "atest_user1_2", + Expr: "sum(rate(node_cpu_seconds_total[3h:10m]))", + Labels: []cortexpb.LabelAdapter{ + {Name: "key", Value: "val"}, + }, + Annotations: []cortexpb.LabelAdapter{ + {Name: "aKey", Value: "aVal"}, + }, + For: 10 * time.Second, + KeepFiringFor: 20 * time.Second, + }, + } + + tenantId := "user1" + + rulerStateMapOnePending := map[string]ring.InstanceState{ + "ruler1": ring.PENDING, + "ruler2": ring.ACTIVE, + "ruler3": ring.ACTIVE, + } + + rulerAZEvenSpread := map[string]string{ + "ruler1": "a", + "ruler2": "b", + "ruler3": "c", + } + + expectedRules := expectedRulesMap{ + "ruler1": map[string]rulespb.RuleGroupList{ + tenantId: { + &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "l1", Interval: 10 * time.Minute, Limit: 10, Rules: rule}, + &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "l2", Interval: 0, Rules: rule}, + }, + }, + "ruler2": map[string]rulespb.RuleGroupList{ + tenantId: { + &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "b1", Interval: 10 * time.Minute, Limit: 10, Rules: rule}, + &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "b2", Interval: 0, Rules: rule}, + }, + }, + "ruler3": map[string]rulespb.RuleGroupList{ + tenantId: { + &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace2", Name: "b3", Interval: 0, Rules: rule}, + }, + }, + } + + kvStore, cleanUp := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) + t.Cleanup(func() { assert.NoError(t, cleanUp.Close()) }) + allRulesByUser := map[string]rulespb.RuleGroupList{} + allTokensByRuler := map[string][]uint32{} + rulerAddrMap := map[string]*Ruler{} + + createRuler := func(id string) *Ruler { + store := newMockRuleStore(allRulesByUser, nil) + cfg := defaultRulerConfig(t) + + cfg.ShardingStrategy = util.ShardingStrategyShuffle + cfg.EnableSharding = true + cfg.EnableHAEvaluation = true + cfg.EvaluationInterval = 5 * time.Minute + + cfg.Ring = RingConfig{ + InstanceID: id, + InstanceAddr: id, + KVStore: kv.Config{ + Mock: kvStore, + }, + ReplicationFactor: replicationFactor, + ZoneAwarenessEnabled: true, + InstanceZone: rulerAZEvenSpread[id], + } + + r, _ := buildRuler(t, cfg, nil, store, rulerAddrMap) + r.limits = ruleLimits{tenantShard: 3} + rulerAddrMap[id] = r + if r.ring != nil { + require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.ring)) + t.Cleanup(r.ring.StopAsync) + } + return r + } + + for rID, r := range expectedRules { + createRuler(rID) + for u, rules := range r { + allRulesByUser[u] = append(allRulesByUser[u], rules...) + allTokensByRuler[rID] = generateTokenForGroups(rules, 1) + } + } + + err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + d, _ := in.(*ring.Desc) + if d == nil { + d = ring.NewDesc() + } + for rID, tokens := range allTokensByRuler { + d.AddIngester(rID, rulerAddrMap[rID].lifecycler.GetInstanceAddr(), rulerAddrMap[rID].lifecycler.GetInstanceZone(), tokens, ring.ACTIVE, time.Now()) + } + return d, true, nil + }) + require.NoError(t, err) + // Wait a bit to make sure ruler's ring is updated. + time.Sleep(100 * time.Millisecond) + + forEachRuler := func(f func(rID string, r *Ruler)) { + for rID, r := range rulerAddrMap { + f(rID, r) + } + } + + // Sync Rules + forEachRuler(func(_ string, r *Ruler) { + r.syncRules(context.Background(), rulerSyncReasonInitial) + }) + + // update the State of the rulers in the ring based on tc.rulerStateMap + err = kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + d, _ := in.(*ring.Desc) + if d == nil { + d = ring.NewDesc() + } + for rID, tokens := range allTokensByRuler { + d.AddIngester(rID, rulerAddrMap[rID].lifecycler.GetInstanceAddr(), rulerAddrMap[rID].lifecycler.GetInstanceZone(), tokens, rulerStateMapOnePending[rID], time.Now()) + } + return d, true, nil + }) + require.NoError(t, err) + // Wait a bit to make sure ruler's ring is updated. + time.Sleep(100 * time.Millisecond) + + rulerAddrMap["ruler1"].Service.StopAsync() + if err := rulerAddrMap["ruler1"].Service.AwaitTerminated(context.Background()); err != nil { + t.Errorf("ruler %s was not terminated with error %s", "ruler1", err.Error()) + } + + rulerAddrMap["ruler2"].syncRules(context.Background(), rulerSyncReasonPeriodic) + rulerAddrMap["ruler3"].syncRules(context.Background(), rulerSyncReasonPeriodic) + + requireGroupStateEqual := func(a *GroupStateDesc, b *GroupStateDesc) { + require.Equal(t, a.Group.Interval, b.Group.Interval) + require.Equal(t, a.Group.User, b.Group.User) + require.Equal(t, a.Group.Limit, b.Group.Limit) + require.Equal(t, a.EvaluationTimestamp, b.EvaluationTimestamp) + require.Equal(t, a.EvaluationDuration, b.EvaluationDuration) + require.Equal(t, len(a.ActiveRules), len(b.ActiveRules)) + for i, aRule := range a.ActiveRules { + bRule := b.ActiveRules[i] + require.Equal(t, aRule.EvaluationTimestamp, bRule.EvaluationTimestamp) + require.Equal(t, aRule.EvaluationDuration, bRule.EvaluationDuration) + require.Equal(t, aRule.Health, bRule.Health) + require.Equal(t, aRule.LastError, bRule.LastError) + require.Equal(t, aRule.Rule.Expr, bRule.Rule.Expr) + require.Equal(t, len(aRule.Rule.Labels), len(bRule.Rule.Labels)) + require.Equal(t, fmt.Sprintf("%+v", aRule.Rule.Labels), fmt.Sprintf("%+v", aRule.Rule.Labels)) + if aRule.Rule.Alert != "" { + require.Equal(t, fmt.Sprintf("%+v", aRule.Rule.Annotations), fmt.Sprintf("%+v", bRule.Rule.Annotations)) + require.Equal(t, aRule.Rule.Alert, bRule.Rule.Alert) + require.Equal(t, aRule.Rule.For, bRule.Rule.For) + require.Equal(t, aRule.Rule.KeepFiringFor, bRule.Rule.KeepFiringFor) + require.Equal(t, aRule.State, bRule.State) + require.Equal(t, aRule.Alerts, bRule.Alerts) + } else { + require.Equal(t, aRule.Rule.Record, bRule.Rule.Record) + } + } + } + + getRules := func(ruler string) { + ctx := user.InjectOrgID(context.Background(), tenantId) + ruleStateDescriptions, err := rulerAddrMap[ruler].GetRules(ctx, RulesRequest{}) + require.NoError(t, err) + require.Equal(t, 5, len(ruleStateDescriptions)) + stateByKey := map[string]*GroupStateDesc{} + for _, state := range ruleStateDescriptions { + stateByKey[state.Group.Namespace+";"+state.Group.Name] = state + } + // Rule Group Name that starts will b are from the backup and those that start with l are evaluating, the details of + // the group other than the Name should be equal to the group that starts with l as the config is the same. This test + // confirms that the way we convert rulepb.RuleGroupList to GroupStateDesc is consistent to how we convert + // promRules.Group to GroupStateDesc + requireGroupStateEqual(stateByKey["namespace;l1"], stateByKey["namespace;b1"]) + requireGroupStateEqual(stateByKey["namespace;l2"], stateByKey["namespace;b2"]) + } + + getRules("ruler3") + getRules("ruler2") + + ctx := user.InjectOrgID(context.Background(), tenantId) + + ruleResponse, err := rulerAddrMap["ruler2"].Rules(ctx, &RulesRequest{}) + require.NoError(t, err) + require.Equal(t, 5, len(ruleResponse.Groups)) + + ruleResponse, err = rulerAddrMap["ruler3"].Rules(ctx, &RulesRequest{}) + require.NoError(t, err) + require.Equal(t, 5, len(ruleResponse.Groups)) + } +} + func TestSharding(t *testing.T) { const ( user1 = "user1" @@ -1792,7 +2038,7 @@ func TestSharding(t *testing.T) { } r, _ := buildRuler(t, cfg, nil, store, nil) - r.limits = ruleLimits{evalDelay: 0, tenantShard: tc.shuffleShardSize} + r.limits = ruleLimits{tenantShard: tc.shuffleShardSize} if forceRing != nil { r.ring = forceRing @@ -1922,6 +2168,7 @@ func Test_LoadPartialGroups(t *testing.T) { store := newMockRuleStore(allRules, map[string]error{user1: fmt.Errorf("test")}) u, _ := url.Parse("") cfg := Config{ + RulePath: t.TempDir(), EnableSharding: true, ExternalURL: flagext.URLValue{URL: u}, PollInterval: time.Millisecond * 100, @@ -1941,7 +2188,7 @@ func Test_LoadPartialGroups(t *testing.T) { } r1, manager := buildRuler(t, cfg, nil, store, nil) - r1.limits = ruleLimits{evalDelay: 0, tenantShard: 1} + r1.limits = ruleLimits{tenantShard: 1} require.NoError(t, services.StartAndAwaitRunning(context.Background(), r1)) t.Cleanup(r1.StopAsync) @@ -2465,7 +2712,7 @@ func TestRulerDisablesRuleGroups(t *testing.T) { } r, _ := buildRuler(t, cfg, nil, store, nil) - r.limits = ruleLimits{evalDelay: 0, tenantShard: 3, disabledRuleGroups: tc.disabledRuleGroups} + r.limits = ruleLimits{tenantShard: 3, disabledRuleGroups: tc.disabledRuleGroups} if forceRing != nil { r.ring = forceRing diff --git a/pkg/scheduler/queue/queue.go b/pkg/scheduler/queue/queue.go index 0d634debed..4f6cc130b1 100644 --- a/pkg/scheduler/queue/queue.go +++ b/pkg/scheduler/queue/queue.go @@ -66,9 +66,9 @@ type RequestQueue struct { discardedRequests *prometheus.CounterVec // Per user and priority. } -func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, queueLength *prometheus.GaugeVec, discardedRequests *prometheus.CounterVec, limits Limits, registerer prometheus.Registerer) *RequestQueue { +func NewRequestQueue(forgetDelay time.Duration, queueLength *prometheus.GaugeVec, discardedRequests *prometheus.CounterVec, limits Limits, registerer prometheus.Registerer) *RequestQueue { q := &RequestQueue{ - queues: newUserQueues(maxOutstandingPerTenant, forgetDelay, limits, queueLength), + queues: newUserQueues(forgetDelay, limits, queueLength), connectedQuerierWorkers: atomic.NewInt32(0), totalRequests: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ Name: "cortex_request_queue_requests_total", diff --git a/pkg/scheduler/queue/queue_test.go b/pkg/scheduler/queue/queue_test.go index 52abe26270..98a1ea0582 100644 --- a/pkg/scheduler/queue/queue_test.go +++ b/pkg/scheduler/queue/queue_test.go @@ -24,7 +24,7 @@ func BenchmarkGetNextRequest(b *testing.B) { queues := make([]*RequestQueue, 0, b.N) for n := 0; n < b.N; n++ { - queue := NewRequestQueue(maxOutstandingPerTenant, 0, + queue := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), MockLimits{MaxOutstanding: 100}, @@ -83,7 +83,7 @@ func BenchmarkQueueRequest(b *testing.B) { requests := make([]MockRequest, 0, numTenants) for n := 0; n < b.N; n++ { - q := NewRequestQueue(maxOutstandingPerTenant, 0, + q := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), MockLimits{MaxOutstanding: 100}, @@ -123,7 +123,7 @@ func BenchmarkGetNextRequestPriorityQueue(b *testing.B) { queues := make([]*RequestQueue, 0, b.N) for n := 0; n < b.N; n++ { - queue := NewRequestQueue(maxOutstandingPerTenant, 0, + queue := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), MockLimits{MaxOutstanding: 100, QueryPriorityVal: validation.QueryPriority{Enabled: true}}, @@ -182,7 +182,7 @@ func BenchmarkQueueRequestPriorityQueue(b *testing.B) { requests := make([]MockRequest, 0, numTenants) for n := 0; n < b.N; n++ { - q := NewRequestQueue(maxOutstandingPerTenant, 0, + q := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), MockLimits{MaxOutstanding: 100, QueryPriorityVal: validation.QueryPriority{Enabled: true}}, @@ -217,7 +217,7 @@ func BenchmarkQueueRequestPriorityQueue(b *testing.B) { func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBecauseQuerierHasBeenForgotten(t *testing.T) { const forgetDelay = 3 * time.Second - queue := NewRequestQueue(1, forgetDelay, + queue := NewRequestQueue(forgetDelay, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), MockLimits{MaxOutstanding: 100}, @@ -260,7 +260,7 @@ func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBe } func TestQueriersShouldGetHighPriorityQueryFirst(t *testing.T) { - queue := NewRequestQueue(0, 0, + queue := NewRequestQueue(0, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), MockLimits{MaxOutstanding: 3, QueryPriorityVal: validation.QueryPriority{Enabled: true}}, @@ -290,7 +290,7 @@ func TestQueriersShouldGetHighPriorityQueryFirst(t *testing.T) { } func TestReservedQueriersShouldOnlyGetHighPriorityQueries(t *testing.T) { - queue := NewRequestQueue(0, 0, + queue := NewRequestQueue(0, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), MockLimits{ @@ -356,7 +356,7 @@ func TestExitingRequestsShouldPersistEvenIfTheConfigHasChanged(t *testing.T) { limits := MockLimits{ MaxOutstanding: 3, } - queue := NewRequestQueue(0, 0, + queue := NewRequestQueue(0, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), limits, diff --git a/pkg/scheduler/queue/user_queues.go b/pkg/scheduler/queue/user_queues.go index 159df7810b..ee1ce9e804 100644 --- a/pkg/scheduler/queue/user_queues.go +++ b/pkg/scheduler/queue/user_queues.go @@ -38,15 +38,12 @@ type querier struct { // This struct holds user queues for pending requests. It also keeps track of connected queriers, // and mapping between users and queriers. type queues struct { - userQueues map[string]*userQueue - userQueuesMx sync.RWMutex - // List of all users with queues, used for iteration when searching for next queue to handle. // Users removed from the middle are replaced with "". To avoid skipping users during iteration, we only shrink // this list when there are ""'s at the end of it. - users []string - - maxUserQueueSize int + users []string + userQueues map[string]*userQueue + queuesMx sync.RWMutex // How long to wait before removing a querier which has got disconnected // but hasn't notified about a graceful shutdown. @@ -87,16 +84,15 @@ type userQueue struct { index int } -func newUserQueues(maxUserQueueSize int, forgetDelay time.Duration, limits Limits, queueLength *prometheus.GaugeVec) *queues { +func newUserQueues(forgetDelay time.Duration, limits Limits, queueLength *prometheus.GaugeVec) *queues { return &queues{ - userQueues: map[string]*userQueue{}, - users: nil, - maxUserQueueSize: maxUserQueueSize, - forgetDelay: forgetDelay, - queriers: map[string]*querier{}, - sortedQueriers: nil, - limits: limits, - queueLength: queueLength, + userQueues: map[string]*userQueue{}, + users: nil, + forgetDelay: forgetDelay, + queriers: map[string]*querier{}, + sortedQueriers: nil, + limits: limits, + queueLength: queueLength, } } @@ -105,8 +101,8 @@ func (q *queues) len() int { } func (q *queues) deleteQueue(userID string) { - q.userQueuesMx.Lock() - defer q.userQueuesMx.Unlock() + q.queuesMx.Lock() + defer q.queuesMx.Unlock() uq := q.userQueues[userID] if uq == nil { @@ -137,8 +133,8 @@ func (q *queues) getOrAddQueue(userID string, maxQueriers int) userRequestQueue maxQueriers = 0 } - q.userQueuesMx.Lock() - defer q.userQueuesMx.Unlock() + q.queuesMx.Lock() + defer q.queuesMx.Unlock() uq := q.userQueues[userID] priorityEnabled := q.limits.QueryPriority(userID).Enabled @@ -216,12 +212,6 @@ func (q *queues) createUserRequestQueue(userID string) userRequestQueue { queueSize := q.limits.MaxOutstandingPerTenant(userID) - // 0 is the default value of the flag. If the old flag is set - // then we use its value for compatibility reason. - if q.maxUserQueueSize != 0 { - queueSize = q.maxUserQueueSize - } - return NewFIFORequestQueue(make(chan Request, queueSize), userID, q.queueLength) } @@ -231,6 +221,9 @@ func (q *queues) createUserRequestQueue(userID string) userRequestQueue { func (q *queues) getNextQueueForQuerier(lastUserIndex int, querierID string) (userRequestQueue, string, int) { uid := lastUserIndex + q.queuesMx.RLock() + defer q.queuesMx.RUnlock() + for iters := 0; iters < len(q.users); iters++ { uid = uid + 1 @@ -245,9 +238,6 @@ func (q *queues) getNextQueueForQuerier(lastUserIndex int, querierID string) (us continue } - q.userQueuesMx.RLock() - defer q.userQueuesMx.RUnlock() - uq := q.userQueues[u] if uq.queriers != nil { diff --git a/pkg/scheduler/queue/user_queues_test.go b/pkg/scheduler/queue/user_queues_test.go index 4e720de402..0c242eafa7 100644 --- a/pkg/scheduler/queue/user_queues_test.go +++ b/pkg/scheduler/queue/user_queues_test.go @@ -16,7 +16,7 @@ import ( ) func TestQueues(t *testing.T) { - uq := newUserQueues(0, 0, MockLimits{}, nil) + uq := newUserQueues(0, MockLimits{}, nil) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) @@ -71,7 +71,7 @@ func TestQueues(t *testing.T) { } func TestQueuesWithQueriers(t *testing.T) { - uq := newUserQueues(0, 0, MockLimits{}, nil) + uq := newUserQueues(0, MockLimits{}, nil) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) @@ -148,7 +148,7 @@ func TestQueuesConsistency(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - uq := newUserQueues(0, testData.forgetDelay, MockLimits{}, nil) + uq := newUserQueues(testData.forgetDelay, MockLimits{}, nil) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) @@ -197,7 +197,7 @@ func TestQueues_ForgetDelay(t *testing.T) { ) now := time.Now() - uq := newUserQueues(0, forgetDelay, MockLimits{}, nil) + uq := newUserQueues(forgetDelay, MockLimits{}, nil) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) @@ -289,7 +289,7 @@ func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForget ) now := time.Now() - uq := newUserQueues(0, forgetDelay, MockLimits{}, nil) + uq := newUserQueues(forgetDelay, MockLimits{}, nil) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) @@ -358,7 +358,7 @@ func TestGetOrAddQueueShouldUpdateProperties(t *testing.T) { limits := MockLimits{ MaxOutstanding: 3, } - q := newUserQueues(0, 0, limits, nil) + q := newUserQueues(0, limits, nil) q.addQuerierConnection("q-1") q.addQuerierConnection("q-2") q.addQuerierConnection("q-3") @@ -463,7 +463,7 @@ func TestQueueConcurrency(t *testing.T) { limits := MockLimits{ MaxOutstanding: 50, } - q := newUserQueues(0, 0, limits, nil) + q := newUserQueues(0, limits, nil) q.addQuerierConnection("q-1") q.addQuerierConnection("q-2") q.addQuerierConnection("q-3") diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index b3088ccddc..91f25410d6 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -27,8 +27,10 @@ import ( "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -82,15 +84,14 @@ type connectedFrontend struct { } type Config struct { - MaxOutstandingPerTenant int `yaml:"max_outstanding_requests_per_tenant"` - QuerierForgetDelay time.Duration `yaml:"querier_forget_delay"` - GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=This configures the gRPC client used to report errors back to the query-frontend."` + QuerierForgetDelay time.Duration `yaml:"querier_forget_delay"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=This configures the gRPC client used to report errors back to the query-frontend."` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&cfg.MaxOutstandingPerTenant, "query-scheduler.max-outstanding-requests-per-tenant", 0, "Deprecated (use frontend.max-outstanding-requests-per-tenant instead) and will be removed in v1.17.0: Maximum number of outstanding requests per tenant per query-scheduler. In-flight requests above this limit will fail with HTTP response status code 429.") + flagext.DeprecatedFlag(f, "query-scheduler.max-outstanding-requests-per-tenant", "Deprecated: Use frontend.max-outstanding-requests-per-tenant instead.", util_log.Logger) f.DurationVar(&cfg.QuerierForgetDelay, "query-scheduler.querier-forget-delay", 0, "If a querier disconnects without sending notification about graceful shutdown, the query-scheduler will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.") - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("query-scheduler.grpc-client-config", f) + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("query-scheduler.grpc-client-config", "", f) } // NewScheduler creates a new Scheduler. @@ -114,7 +115,7 @@ func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer promethe Help: "Total number of query requests discarded.", }, []string{"user", "priority"}) - s.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, s.queueLength, s.discardedRequests, s.limits, registerer) + s.requestQueue = queue.NewRequestQueue(cfg.QuerierForgetDelay, s.queueLength, s.discardedRequests, s.limits, registerer) s.queueDuration = promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_query_scheduler_queue_duration_seconds", diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index cc16448358..f8327aeb28 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -35,8 +35,6 @@ const testMaxOutstandingPerTenant = 5 func setupScheduler(t *testing.T, reg prometheus.Registerer) (*Scheduler, schedulerpb.SchedulerForFrontendClient, schedulerpb.SchedulerForQuerierClient) { cfg := Config{} flagext.DefaultValues(&cfg) - cfg.MaxOutstandingPerTenant = testMaxOutstandingPerTenant - s, err := NewScheduler(cfg, frontendv1.MockLimits{Queriers: 2, MockLimits: queue.MockLimits{MaxOutstanding: testMaxOutstandingPerTenant}}, log.NewNopLogger(), reg) require.NoError(t, err) diff --git a/pkg/storage/tsdb/inmemory_index_cache.go b/pkg/storage/tsdb/inmemory_index_cache.go index 1530bf99f8..95e10803cd 100644 --- a/pkg/storage/tsdb/inmemory_index_cache.go +++ b/pkg/storage/tsdb/inmemory_index_cache.go @@ -16,6 +16,8 @@ import ( storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/tenancy" + + "github.com/cortexproject/cortex/pkg/util" ) type InMemoryIndexCache struct { @@ -147,8 +149,8 @@ func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID uli blockIDKey := blockID.String() requests := 0 hit := 0 - for _, key := range keys { - if ctx.Err() != nil { + for i, key := range keys { + if (i+1)%util.CheckContextEveryNIterations == 0 && ctx.Err() != nil { c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypePostings, tenant).Add(float64(requests)) c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypePostings, tenant).Add(float64(hit)) return hits, misses @@ -208,8 +210,8 @@ func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid. blockIDKey := blockID.String() requests := 0 hit := 0 - for _, id := range ids { - if ctx.Err() != nil { + for i, id := range ids { + if (i+1)%util.CheckContextEveryNIterations == 0 && ctx.Err() != nil { c.commonMetrics.RequestTotal.WithLabelValues(storecache.CacheTypeSeries, tenant).Add(float64(requests)) c.commonMetrics.HitsTotal.WithLabelValues(storecache.CacheTypeSeries, tenant).Add(float64(hit)) return hits, misses diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index d3efcfc112..cd6baa146a 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -699,19 +699,20 @@ func TestBucketStores_deleteLocalFilesForExcludedTenants(t *testing.T) { require.NoError(t, stores.InitialSync(ctx)) require.Equal(t, []string{user1, user2}, getUsersInDir(t, cfg.BucketStore.SyncDir)) - metricNames := []string{"cortex_bucket_store_block_drops_total", "cortex_bucket_store_block_loads_total", "cortex_bucket_store_blocks_loaded"} + metricNamesWithoutLoaded := []string{"cortex_bucket_store_block_drops_total", "cortex_bucket_store_block_loads_total"} + metricNames := append(metricNamesWithoutLoaded, "cortex_bucket_store_blocks_loaded") require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. - # TYPE cortex_bucket_store_block_drops_total counter - cortex_bucket_store_block_drops_total 0 - # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. - # TYPE cortex_bucket_store_block_loads_total counter - cortex_bucket_store_block_loads_total 2 - # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. - # TYPE cortex_bucket_store_blocks_loaded gauge - cortex_bucket_store_blocks_loaded{user="user-1"} 1 - cortex_bucket_store_blocks_loaded{user="user-2"} 1 + # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. + # TYPE cortex_bucket_store_block_drops_total counter + cortex_bucket_store_block_drops_total 0 + # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. + # TYPE cortex_bucket_store_block_loads_total counter + cortex_bucket_store_block_loads_total 2 + # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. + # TYPE cortex_bucket_store_blocks_loaded gauge + cortex_bucket_store_blocks_loaded{user="user-1"} 1 + cortex_bucket_store_blocks_loaded{user="user-2"} 1 `), metricNames...)) // Single user left in shard. @@ -720,15 +721,15 @@ func TestBucketStores_deleteLocalFilesForExcludedTenants(t *testing.T) { require.Equal(t, []string{user1}, getUsersInDir(t, cfg.BucketStore.SyncDir)) require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. - # TYPE cortex_bucket_store_block_drops_total counter - cortex_bucket_store_block_drops_total 1 - # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. - # TYPE cortex_bucket_store_block_loads_total counter - cortex_bucket_store_block_loads_total 2 - # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. - # TYPE cortex_bucket_store_blocks_loaded gauge - cortex_bucket_store_blocks_loaded{user="user-1"} 1 + # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. + # TYPE cortex_bucket_store_block_drops_total counter + cortex_bucket_store_block_drops_total 1 + # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. + # TYPE cortex_bucket_store_block_loads_total counter + cortex_bucket_store_block_loads_total 2 + # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. + # TYPE cortex_bucket_store_blocks_loaded gauge + cortex_bucket_store_blocks_loaded{user="user-1"} 1 `), metricNames...)) // No users left in this shard. @@ -737,13 +738,13 @@ func TestBucketStores_deleteLocalFilesForExcludedTenants(t *testing.T) { require.Equal(t, []string(nil), getUsersInDir(t, cfg.BucketStore.SyncDir)) require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. - # TYPE cortex_bucket_store_block_drops_total counter - cortex_bucket_store_block_drops_total 2 - # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. - # TYPE cortex_bucket_store_block_loads_total counter - cortex_bucket_store_block_loads_total 2 - `), metricNames...)) + # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. + # TYPE cortex_bucket_store_block_drops_total counter + cortex_bucket_store_block_drops_total 2 + # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. + # TYPE cortex_bucket_store_block_loads_total counter + cortex_bucket_store_block_loads_total 2 + `), metricNamesWithoutLoaded...)) // We can always get user back. sharding.users = []string{user1} @@ -751,15 +752,15 @@ func TestBucketStores_deleteLocalFilesForExcludedTenants(t *testing.T) { require.Equal(t, []string{user1}, getUsersInDir(t, cfg.BucketStore.SyncDir)) require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. - # TYPE cortex_bucket_store_block_drops_total counter - cortex_bucket_store_block_drops_total 2 - # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. - # TYPE cortex_bucket_store_block_loads_total counter - cortex_bucket_store_block_loads_total 3 - # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. - # TYPE cortex_bucket_store_blocks_loaded gauge - cortex_bucket_store_blocks_loaded{user="user-1"} 1 + # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. + # TYPE cortex_bucket_store_block_drops_total counter + cortex_bucket_store_block_drops_total 2 + # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. + # TYPE cortex_bucket_store_block_loads_total counter + cortex_bucket_store_block_loads_total 3 + # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. + # TYPE cortex_bucket_store_blocks_loaded gauge + cortex_bucket_store_blocks_loaded{user="user-1"} 1 `), metricNames...)) } diff --git a/pkg/storegateway/gateway_ring.go b/pkg/storegateway/gateway_ring.go index 8965c32f95..fc39f80b42 100644 --- a/pkg/storegateway/gateway_ring.go +++ b/pkg/storegateway/gateway_ring.go @@ -106,7 +106,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.TokensFilePath, ringFlagsPrefix+"tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") f.BoolVar(&cfg.ZoneAwarenessEnabled, ringFlagsPrefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate blocks across different availability zones.") f.BoolVar(&cfg.KeepInstanceInTheRingOnShutdown, ringFlagsPrefix+"keep-instance-in-the-ring-on-shutdown", false, "True to keep the store gateway instance in the ring when it shuts down. The instance will then be auto-forgotten from the ring after 10*heartbeat_timeout.") - f.BoolVar(&cfg.ZoneStableShuffleSharding, ringFlagsPrefix+"zone-stable-shuffle-sharding", false, "If true, use zone stable shuffle sharding algorithm. Otherwise, use the default shuffle sharding algorithm.") + f.BoolVar(&cfg.ZoneStableShuffleSharding, ringFlagsPrefix+"zone-stable-shuffle-sharding", true, "If true, use zone stable shuffle sharding algorithm. Otherwise, use the default shuffle sharding algorithm.") // Wait stability flags. f.DurationVar(&cfg.WaitStabilityMinDuration, ringFlagsPrefix+"wait-stability-min-duration", time.Minute, "Minimum time to wait for ring stability at startup. 0 to disable.") diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index 3a27211269..ac3a23bfeb 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -38,7 +38,6 @@ type Config struct { } type Otel struct { - OltpEndpoint string `yaml:"oltp_endpoint" json:"oltp_endpoint" doc:"hidden"` OtlpEndpoint string `yaml:"otlp_endpoint" json:"otlp_endpoint"` ExporterType string `yaml:"exporter_type" json:"exporter_type"` SampleRatio float64 `yaml:"sample_ratio" json:"sample_ratio"` @@ -53,7 +52,6 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { p := "tracing" f.StringVar(&c.Type, p+".type", JaegerType, "Tracing type. OTEL and JAEGER are currently supported. For jaeger `JAEGER_AGENT_HOST` environment variable should also be set. See: https://cortexmetrics.io/docs/guides/tracing .") f.Float64Var(&c.Otel.SampleRatio, p+".otel.sample-ratio", 0.001, "Fraction of traces to be sampled. Fractions >= 1 means sampling if off and everything is traced.") - f.StringVar(&c.Otel.OltpEndpoint, p+".otel.oltp-endpoint", "", "DEPRECATED: use otel.otlp-endpoint instead.") f.StringVar(&c.Otel.OtlpEndpoint, p+".otel.otlp-endpoint", "", "otl collector endpoint that the driver will use to send spans.") f.StringVar(&c.Otel.ExporterType, p+".otel.exporter-type", "", "enhance/modify traces/propagators for specific exporter. If empty, OTEL defaults will apply. Supported values are: `awsxray.`") f.BoolVar(&c.Otel.TLSEnabled, p+".otel.tls-enabled", c.Otel.TLSEnabled, "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.") @@ -64,12 +62,9 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { func (c *Config) Validate() error { switch strings.ToLower(c.Type) { case OtelType: - if (c.Otel.OtlpEndpoint == "") && (c.Otel.OltpEndpoint == "") { + if c.Otel.OtlpEndpoint == "" { return errors.New("otlp-endpoint must be defined when using otel exporter") } - if len(c.Otel.OltpEndpoint) > 0 { - level.Warn(util_log.Logger).Log("msg", "DEPRECATED: otel.oltp-endpoint is deprecated. Use otel.otlp-endpoint instead.") - } } return nil @@ -90,15 +85,7 @@ func SetupTracing(ctx context.Context, name string, c Config) (func(context.Cont case OtelType: util_log.Logger.Log("msg", "creating otel exporter") - if (len(c.Otel.OtlpEndpoint) > 0) && (len(c.Otel.OltpEndpoint) > 0) { - level.Warn(util_log.Logger).Log("msg", "DEPRECATED: otel.otlp and otel.oltp both set, using otel.otlp because otel.oltp is deprecated") - } - endpoint := c.Otel.OtlpEndpoint - if (c.Otel.OtlpEndpoint == "") && (len(c.Otel.OltpEndpoint) > 0) { - level.Warn(util_log.Logger).Log("msg", "DEPRECATED: otel.oltp is deprecated use otel.otlp") - endpoint = c.Otel.OltpEndpoint - } options := []otlptracegrpc.Option{ otlptracegrpc.WithEndpoint(endpoint), } diff --git a/pkg/util/context.go b/pkg/util/context.go new file mode 100644 index 0000000000..6cd5766fc6 --- /dev/null +++ b/pkg/util/context.go @@ -0,0 +1,6 @@ +package util + +const ( + // CheckContextEveryNIterations is used in some tight loops to check if the context is done. + CheckContextEveryNIterations = 128 +) diff --git a/pkg/util/grpcclient/grpcclient.go b/pkg/util/grpcclient/grpcclient.go index 61f7bfce28..edd767ad4d 100644 --- a/pkg/util/grpcclient/grpcclient.go +++ b/pkg/util/grpcclient/grpcclient.go @@ -36,14 +36,14 @@ type Config struct { // RegisterFlags registers flags. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("", f) + cfg.RegisterFlagsWithPrefix("", "", f) } // RegisterFlagsWithPrefix registers flags with prefix. -func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { +func (cfg *Config) RegisterFlagsWithPrefix(prefix, defaultGrpcCompression string, f *flag.FlagSet) { f.IntVar(&cfg.MaxRecvMsgSize, prefix+".grpc-max-recv-msg-size", 100<<20, "gRPC client max receive message size (bytes).") f.IntVar(&cfg.MaxSendMsgSize, prefix+".grpc-max-send-msg-size", 16<<20, "gRPC client max send message size (bytes).") - f.StringVar(&cfg.GRPCCompression, prefix+".grpc-compression", "", "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)") + f.StringVar(&cfg.GRPCCompression, prefix+".grpc-compression", defaultGrpcCompression, "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)") f.Float64Var(&cfg.RateLimit, prefix+".grpc-client-rate-limit", 0., "Rate limit for gRPC client; 0 means disabled.") f.IntVar(&cfg.RateLimitBurst, prefix+".grpc-client-rate-limit-burst", 0, "Rate limit burst for gRPC client.") f.BoolVar(&cfg.BackoffOnRatelimits, prefix+".backoff-on-ratelimits", false, "Enable backoff and retry when we hit ratelimits.") diff --git a/pkg/util/shard.go b/pkg/util/shard.go index 82392b3a1a..5d3de01cc4 100644 --- a/pkg/util/shard.go +++ b/pkg/util/shard.go @@ -10,6 +10,10 @@ const ( // Sharding strategies. ShardingStrategyDefault = "default" ShardingStrategyShuffle = "shuffle-sharding" + + // Compaction mode + CompactionStrategyDefault = "default" + CompactionStrategyPartitioning = "partitioning" ) var ( diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 2e885a6c40..87173abb13 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -11,6 +11,7 @@ import ( "time" "github.com/cespare/xxhash/v2" + "github.com/go-kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" @@ -265,7 +266,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxOutstandingPerTenant, "frontend.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per request queue (either query frontend or query scheduler); requests beyond this error with HTTP 429.") - f.Var(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", "Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") + f.Var(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", "Deprecated(use ruler.query-offset instead) and will be removed in v1.19.0: Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") f.IntVar(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") f.IntVar(&l.RulerMaxRulesPerRuleGroup, "ruler.max-rules-per-rule-group", 0, "Maximum number of rules per rule group per-tenant. 0 to disable.") f.IntVar(&l.RulerMaxRuleGroupsPerTenant, "ruler.max-rule-groups-per-tenant", 0, "Maximum number of rule groups per-tenant. 0 to disable.") @@ -758,11 +759,6 @@ func (o *Overrides) IngestionTenantShardSize(userID string) int { return o.GetOverridesForUser(userID).IngestionTenantShardSize } -// EvaluationDelay returns the rules evaluation delay for a given user. -func (o *Overrides) EvaluationDelay(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).RulerEvaluationDelay) -} - // CompactorBlocksRetentionPeriod returns the retention period for a given user. func (o *Overrides) CompactorBlocksRetentionPeriod(userID string) time.Duration { return time.Duration(o.GetOverridesForUser(userID).CompactorBlocksRetentionPeriod) @@ -795,7 +791,13 @@ func (o *Overrides) RulerMaxRuleGroupsPerTenant(userID string) int { // RulerQueryOffset returns the rule query offset for a given user. func (o *Overrides) RulerQueryOffset(userID string) time.Duration { - return time.Duration(o.GetOverridesForUser(userID).RulerQueryOffset) + ruleOffset := time.Duration(o.GetOverridesForUser(userID).RulerQueryOffset) + evaluationDelay := time.Duration(o.GetOverridesForUser(userID).RulerEvaluationDelay) + if evaluationDelay > ruleOffset { + level.Warn(util_log.Logger).Log("msg", "ruler.query-offset was overridden by highest value in [Deprecated]ruler.evaluation-delay-duration", "ruler.query-offset", ruleOffset, "ruler.evaluation-delay-duration", evaluationDelay) + return evaluationDelay + } + return ruleOffset } // StoreGatewayTenantShardSize returns the store-gateway shard size for a given user. diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index 05807f63c9..997988ada9 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -764,3 +764,21 @@ func TestCompileQueryPriorityRegex(t *testing.T) { require.NoError(t, err) require.Nil(t, l.QueryPriority.Priorities[0].QueryAttributes[0].CompiledRegex) } + +func TestEvaluationDelayHigherThanRulerQueryOffset(t *testing.T) { + tenant := "tenant" + evaluationDelay := time.Duration(10) + tenantLimits := map[string]*Limits{ + tenant: { + RulerQueryOffset: 5, + RulerEvaluationDelay: model.Duration(evaluationDelay), + }, + } + + defaults := Limits{} + ov, err := NewOverrides(defaults, newMockTenantLimits(tenantLimits)) + require.NoError(t, err) + + rulerQueryOffset := ov.RulerQueryOffset(tenant) + assert.Equal(t, evaluationDelay, rulerQueryOffset) +} diff --git a/pkg/util/validation/validate.go b/pkg/util/validation/validate.go index c13e2cb72f..be94cfa2f1 100644 --- a/pkg/util/validation/validate.go +++ b/pkg/util/validation/validate.go @@ -74,9 +74,10 @@ const ( ) type ValidateMetrics struct { - DiscardedSamples *prometheus.CounterVec - DiscardedExemplars *prometheus.CounterVec - DiscardedMetadata *prometheus.CounterVec + DiscardedSamples *prometheus.CounterVec + DiscardedExemplars *prometheus.CounterVec + DiscardedMetadata *prometheus.CounterVec + HistogramSamplesReducedResolution *prometheus.CounterVec } func registerCollector(r prometheus.Registerer, c prometheus.Collector) { @@ -111,10 +112,19 @@ func NewValidateMetrics(r prometheus.Registerer) *ValidateMetrics { []string{discardReasonLabel, "user"}, ) registerCollector(r, discardedMetadata) + histogramSamplesReducedResolution := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cortex_reduced_resolution_histogram_samples_total", + Help: "The total number of histogram samples that had the resolution reduced.", + }, + []string{"user"}, + ) + registerCollector(r, histogramSamplesReducedResolution) m := &ValidateMetrics{ - DiscardedSamples: discardedSamples, - DiscardedExemplars: discardedExemplars, - DiscardedMetadata: discardedMetadata, + DiscardedSamples: discardedSamples, + DiscardedExemplars: discardedExemplars, + DiscardedMetadata: discardedMetadata, + HistogramSamplesReducedResolution: histogramSamplesReducedResolution, } return m @@ -286,6 +296,7 @@ func ValidateNativeHistogram(validateMetrics *ValidateMetrics, limits *Limits, u return cortexpb.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets) } fh := cortexpb.FloatHistogramProtoToFloatHistogram(histogramSample) + oBuckets := len(fh.PositiveBuckets) + len(fh.NegativeBuckets) for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > limits.MaxNativeHistogramBuckets { if fh.Schema <= histogram.ExponentialSchemaMin { validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc() @@ -293,6 +304,9 @@ func ValidateNativeHistogram(validateMetrics *ValidateMetrics, limits *Limits, u } fh = fh.ReduceResolution(fh.Schema - 1) } + if oBuckets != len(fh.PositiveBuckets)+len(fh.NegativeBuckets) { + validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc() + } // If resolution reduced, convert new float histogram to protobuf type again. return cortexpb.FloatHistogramToHistogramProto(histogramSample.TimestampMs, fh), nil } @@ -308,6 +322,7 @@ func ValidateNativeHistogram(validateMetrics *ValidateMetrics, limits *Limits, u return cortexpb.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets) } h := cortexpb.HistogramProtoToHistogram(histogramSample) + oBuckets := len(h.PositiveBuckets) + len(h.NegativeBuckets) for len(h.PositiveBuckets)+len(h.NegativeBuckets) > limits.MaxNativeHistogramBuckets { if h.Schema <= histogram.ExponentialSchemaMin { validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc() @@ -315,6 +330,9 @@ func ValidateNativeHistogram(validateMetrics *ValidateMetrics, limits *Limits, u } h = h.ReduceResolution(h.Schema - 1) } + if oBuckets != len(h.PositiveBuckets)+len(h.NegativeBuckets) { + validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc() + } // If resolution reduced, convert new histogram to protobuf type again. return cortexpb.HistogramToHistogramProto(histogramSample.TimestampMs, h), nil } @@ -331,4 +349,7 @@ func DeletePerUserValidationMetrics(validateMetrics *ValidateMetrics, userID str if err := util.DeleteMatchingLabels(validateMetrics.DiscardedMetadata, filter); err != nil { level.Warn(log).Log("msg", "failed to remove cortex_discarded_metadata_total metric for user", "user", userID, "err", err) } + if err := util.DeleteMatchingLabels(validateMetrics.HistogramSamplesReducedResolution, filter); err != nil { + level.Warn(log).Log("msg", "failed to remove cortex_reduced_resolution_histogram_samples_total metric for user", "user", userID, "err", err) + } } diff --git a/pkg/util/validation/validate_test.go b/pkg/util/validation/validate_test.go index 7957c89a11..7d7cf45930 100644 --- a/pkg/util/validation/validate_test.go +++ b/pkg/util/validation/validate_test.go @@ -310,6 +310,7 @@ func TestValidateNativeHistogram(t *testing.T) { for _, tc := range []struct { name string bucketLimit int + resolutionReduced bool histogram cortexpb.Histogram expectedHistogram cortexpb.Histogram expectedErr error @@ -341,12 +342,14 @@ func TestValidateNativeHistogram(t *testing.T) { bucketLimit: 6, histogram: cortexpb.HistogramToHistogramProto(0, h.Copy()), expectedHistogram: cortexpb.HistogramToHistogramProto(0, h.Copy().ReduceResolution(0)), + resolutionReduced: true, }, { name: "exceed limit and reduce resolution for 1 level, float histogram", bucketLimit: 6, histogram: cortexpb.FloatHistogramToHistogramProto(0, fh.Copy()), expectedHistogram: cortexpb.FloatHistogramToHistogramProto(0, fh.Copy().ReduceResolution(0)), + resolutionReduced: true, }, { name: "exceed limit and reduce resolution for 2 levels, histogram", @@ -394,7 +397,13 @@ func TestValidateNativeHistogram(t *testing.T) { if tc.expectedErr != nil { require.Equal(t, tc.expectedErr, actualErr) require.Equal(t, float64(1), testutil.ToFloat64(validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID))) + // Should never increment if error was returned + require.Equal(t, float64(0), testutil.ToFloat64(validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID))) + } else { + if tc.resolutionReduced { + require.Equal(t, float64(1), testutil.ToFloat64(validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID))) + } require.NoError(t, actualErr) require.Equal(t, tc.expectedHistogram, actualHistogram) } diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 7ef5fc0def..73d8ea9450 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,46 @@ # Changelog +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09) + + +### Features + +* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942)) + +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01) + + +### Bug Fixes + +* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c)) +* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25) + + +### Features + +* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e)) + + +### Bug Fixes + +* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24) + + +### Bug Fixes + +* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414) + ## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index d579e482e8..58af931887 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -44,6 +44,21 @@ const ( universeDomainDefault = "googleapis.com" ) +// tokenState represents different states for a [Token]. +type tokenState int + +const ( + // fresh indicates that the [Token] is valid. It is not expired or close to + // expired, or the token has no expiry. + fresh tokenState = iota + // stale indicates that the [Token] is close to expired, and should be + // refreshed. The token can be used normally. + stale + // invalid indicates that the [Token] is expired or invalid. The token + // cannot be used for a normal operation. + invalid +) + var ( defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} @@ -81,13 +96,13 @@ type Token struct { // IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not // expired. A token is considered expired if [Token.Expiry] has passed or will -// pass in the next 10 seconds. +// pass in the next 225 seconds. func (t *Token) IsValid() bool { return t.isValidWithEarlyExpiry(defaultExpiryDelta) } func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { - if t == nil || t.Value == "" { + if t.isEmpty() { return false } if t.Expiry.IsZero() { @@ -96,6 +111,10 @@ func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) } +func (t *Token) isEmpty() bool { + return t == nil || t.Value == "" +} + // Credentials holds Google credentials, including // [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). type Credentials struct { @@ -206,11 +225,15 @@ func NewCredentials(opts *CredentialsOptions) *Credentials { // CachedTokenProvider. type CachedTokenProviderOptions struct { // DisableAutoRefresh makes the TokenProvider always return the same token, - // even if it is expired. + // even if it is expired. The default is false. Optional. DisableAutoRefresh bool // ExpireEarly configures the amount of time before a token expires, that it - // should be refreshed. If unset, the default value is 10 seconds. + // should be refreshed. If unset, the default value is 3 minutes and 45 + // seconds. Optional. ExpireEarly time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool } func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { @@ -227,34 +250,126 @@ func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { return ctpo.ExpireEarly } +func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { + if ctpo == nil { + return false + } + return ctpo.DisableAsyncRefresh +} + // NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned -// by the underlying provider. By default it will refresh tokens ten seconds -// before they expire, but this time can be configured with the optional -// options. +// by the underlying provider. By default it will refresh tokens asynchronously +// (non-blocking mode) within a window that starts 3 minutes and 45 seconds +// before they expire. The asynchronous (non-blocking) refresh can be changed to +// a synchronous (blocking) refresh using the +// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry +// duration can be configured using the CachedTokenProviderOptions.ExpireEarly +// option. func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { if ctp, ok := tp.(*cachedTokenProvider); ok { return ctp } return &cachedTokenProvider{ - tp: tp, - autoRefresh: opts.autoRefresh(), - expireEarly: opts.expireEarly(), + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + blockingRefresh: opts.blockingRefresh(), } } type cachedTokenProvider struct { - tp TokenProvider - autoRefresh bool - expireEarly time.Duration + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + blockingRefresh bool mu sync.Mutex cachedToken *Token + // isRefreshRunning ensures that the non-blocking refresh will only be + // attempted once, even if multiple callers enter the Token method. + isRefreshRunning bool + // isRefreshErr ensures that the non-blocking refresh will only be attempted + // once per refresh window if an error is encountered. + isRefreshErr bool } func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + if c.blockingRefresh { + return c.tokenBlocking(ctx) + } + return c.tokenNonBlocking(ctx) +} + +func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) { + switch c.tokenState() { + case fresh: + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + case stale: + c.tokenAsync(ctx) + // Return the stale token immediately to not block customer requests to Cloud services. + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + default: // invalid + return c.tokenBlocking(ctx) + } +} + +// tokenState reports the token's validity. +func (c *cachedTokenProvider) tokenState() tokenState { c.mu.Lock() defer c.mu.Unlock() - if c.cachedToken.IsValid() || !c.autoRefresh { + t := c.cachedToken + if t == nil || t.Value == "" { + return invalid + } else if t.Expiry.IsZero() { + return fresh + } else if timeNow().After(t.Expiry.Round(0)) { + return invalid + } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + return stale + } + return fresh +} + +// tokenAsync uses a bool to ensure that only one non-blocking token refresh +// happens at a time, even if multiple callers have entered this function +// concurrently. This avoids creating an arbitrary number of concurrent +// goroutines. Retries should be attempted and managed within the Token method. +// If the refresh attempt fails, no further attempts are made until the refresh +// window expires and the token enters the invalid state, at which point the +// blocking call to Token should likely return the same error on the main goroutine. +func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { + fn := func() { + c.mu.Lock() + c.isRefreshRunning = true + c.mu.Unlock() + t, err := c.tp.Token(ctx) + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshRunning = false + if err != nil { + // Discard errors from the non-blocking refresh, but prevent further + // attempts. + c.isRefreshErr = true + return + } + c.cachedToken = t + } + c.mu.Lock() + defer c.mu.Unlock() + if !c.isRefreshRunning && !c.isRefreshErr { + go fn() + } +} + +func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) { + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshErr = false + if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) { return c.cachedToken, nil } t, err := c.tp.Token(ctx) @@ -423,12 +538,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { v := url.Values{} v.Set("grant_type", defaultGrantType) v.Set("assertion", payload) - resp, err := tp.Client.PostForm(tp.opts.TokenURL, v) + req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode())) if err != nil { - return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + return nil, err } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(tp.Client, req) if err != nil { return nil, fmt.Errorf("auth: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index f3ec888242..6f70fa353b 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,9 +37,10 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -func computeTokenProvider(earlyExpiry time.Duration, scope ...string) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: scope}, &auth.CachedTokenProviderOptions{ - ExpireEarly: earlyExpiry, +func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + DisableAsyncRefresh: opts.DisableAsyncRefresh, }) } diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index cb3f44f587..2d9a73edf3 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -37,6 +37,9 @@ const ( googleAuthURL = "https://accounts.google.com/o/oauth2/auth" googleTokenURL = "https://oauth2.googleapis.com/token" + // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint. + GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + // Help on default credentials adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" ) @@ -73,16 +76,18 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if err := opts.validate(); err != nil { return nil, err } - if opts.CredentialsJSON != nil { + if len(opts.CredentialsJSON) > 0 { return readCredentialsFileJSON(opts.CredentialsJSON, opts) } if opts.CredentialsFile != "" { return readCredentialsFile(opts.CredentialsFile, opts) } if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { - if creds, err := readCredentialsFile(filename, opts); err == nil { - return creds, err + creds, err := readCredentialsFile(filename, opts) + if err != nil { + return nil, err } + return creds, nil } fileName := credsfile.GetWellKnownFileName() @@ -92,7 +97,7 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if OnGCE() { return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts.EarlyTokenRefresh, opts.Scopes...), + TokenProvider: computeTokenProvider(opts), ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { return metadata.ProjectID() }), @@ -116,8 +121,13 @@ type DetectOptions struct { // Optional. Subject string // EarlyTokenRefresh configures how early before a token expires that it - // should be refreshed. + // should be refreshed. Once the token’s time until expiration has entered + // this refresh window the token is considered valid but stale. If unset, + // the default value is 3 minutes and 45 seconds. Optional. EarlyTokenRefresh time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool // AuthHandlerOptions configures an authorization handler and other options // for 3LO flows. It is required, and only used, for client credential // flows. diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index d9e1dcddf6..a34f6b06f8 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -122,7 +122,7 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. - req, err := http.NewRequest("POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) + req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) if err != nil { return "", err } @@ -194,20 +194,14 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) - resp, err := sp.Client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } - return string(respBody), nil + return string(body), nil } func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) { @@ -233,29 +227,21 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } - - resp, err := sp.Client.Do(req) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } // This endpoint will return the region in format: us-east-2b. // Only the us-east-2 part should be used. - bodyLen := len(respBody) + bodyLen := len(body) if bodyLen == 0 { return "", nil } - return string(respBody[:bodyLen-1]), nil + return string(body[:bodyLen-1]), nil } func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) { @@ -299,22 +285,17 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } - - resp, err := sp.Client.Do(req) - if err != nil { - return result, err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } if resp.StatusCode != http.StatusOK { - return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", respBody) + return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) + } + if err := json.Unmarshal(body, &result); err != nil { + return nil, err } - err = json.Unmarshal(respBody, &result) - return result, err + return result, nil } func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) { @@ -329,20 +310,14 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } - resp, err := sp.Client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } - return string(respBody), nil + return string(body), nil } // awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index 22b8af1c11..e33d35a268 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -48,27 +48,21 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } - resp, err := sp.Client.Do(req) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("credentials: invalid body in subject token URL query: %w", err) - } if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { - return "", fmt.Errorf("credentials: status code %d: %s", c, respBody) + return "", fmt.Errorf("credentials: status code %d: %s", c, body) } if sp.Format == nil { - return string(respBody), nil + return string(body), nil } switch sp.Format.Type { case "json": jsonData := make(map[string]interface{}) - err = json.Unmarshal(respBody, &jsonData) + err = json.Unmarshal(body, &jsonData) if err != nil { return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) } @@ -82,7 +76,7 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) } return token, nil case fileTypeText: - return string(respBody), nil + return string(body), nil default: return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index 467edb9088..720045d3b0 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -25,6 +25,7 @@ import ( "net/http" "net/url" "os" + "strings" "time" "cloud.google.com/go/auth" @@ -129,12 +130,13 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("requested_token_type", requestTokenType) v.Set("subject_token", payload) v.Set("subject_token_type", subjectTokenType) - resp, err := g.client.PostForm(g.tokenURL, v) + + req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode())) if err != nil { - return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + return nil, err } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index 3ceab873b8..ed53afa519 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -109,15 +109,10 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err := setAuthHeader(ctx, o.Tp, req); err != nil { return nil, err } - resp, err := o.Client.Do(req) + resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("credentials: unable to read body: %w", err) - } if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index f70e0aef48..768a9dafc1 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -93,16 +93,10 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) - resp, err := opts.Client.Do(req) + resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } - defer resp.Body.Close() - - body, err := internal.ReadAll(resp.Body) - if err != nil { - return nil, err - } if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 75bda4c638..5c3bc66f99 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -16,6 +16,7 @@ package grpctransport import ( "context" + "crypto/tls" "errors" "fmt" "net/http" @@ -45,6 +46,11 @@ var ( timeoutDialerOption grpc.DialOption ) +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + // Options used to configure a [GRPCClientConnPool] from [Dial]. type Options struct { // DisableTelemetry disables default telemetry (OpenTelemetry). An example @@ -69,6 +75,10 @@ type Options struct { // Credentials used to add Authorization metadata to all requests. If set // DetectOpts are ignored. Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider // DetectOpts configures settings for detect Application Default // Credentials. DetectOpts *credentials.DetectOptions @@ -77,6 +87,9 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -99,7 +112,8 @@ func (o *Options) validate() error { if o.InternalOptions != nil && o.InternalOptions.SkipValidation { return nil } - hasCreds := o.Credentials != nil || + hasCreds := o.APIKey != "" || + o.Credentials != nil || (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") if o.DisableAuthentication && hasCreds { @@ -125,6 +139,13 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions { if len(do.Scopes) == 0 && do.Audience == "" && io != nil { do.Audience = o.InternalOptions.DefaultAudience } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = credentials.GoogleMTLSTokenURL + } return do } @@ -189,9 +210,10 @@ func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, // return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1 func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) { tOpts := &transport.Options{ - Endpoint: opts.Endpoint, - Client: opts.client(), - UniverseDomain: opts.UniverseDomain, + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -213,8 +235,21 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er grpc.WithTransportCredentials(transportCreds), } - // Authentication can only be sent when communicating over a secure connection. - if !opts.DisableAuthentication { + // Ensure the token exchange HTTP transport uses the same ClientCertProvider as the GRPC API transport. + opts.ClientCertProvider, err = transport.GetClientCertificateProvider(tOpts) + if err != nil { + return nil, err + } + + if opts.APIKey != "" { + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcKeyProvider{ + apiKey: opts.APIKey, + metadata: opts.Metadata, + secure: secure, + }), + ) + } else if !opts.DisableAuthentication { metadata := opts.Metadata var creds *auth.Credentials @@ -259,6 +294,26 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er return grpc.DialContext(ctx, endpoint, grpcOpts...) } +// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcKeyProvider struct { + apiKey string + metadata map[string]string + secure bool +} + +func (g *grpcKeyProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + metadata := make(map[string]string, len(g.metadata)+1) + metadata["X-goog-api-key"] = g.apiKey + for k, v := range g.metadata { + metadata[k] = v + } + return metadata, nil +} + +func (g *grpcKeyProvider) RequireTransportSecurity() bool { + return g.secure +} + // grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. type grpcCredentialsProvider struct { creds *auth.Credentials diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index ef09c1b752..969c8d4d20 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -116,6 +116,13 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { if len(do.Scopes) == 0 && do.Audience == "" && io != nil { do.Audience = o.InternalOptions.DefaultAudience } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = detect.GoogleMTLSTokenURL + } return do } @@ -195,6 +202,8 @@ func NewClient(opts *Options) (*http.Client, error) { if baseRoundTripper == nil { baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext) } + // Ensure the token exchange transport uses the same ClientCertProvider as the API transport. + opts.ClientCertProvider = clientCertProvider trans, err := newTransport(baseRoundTripper, opts) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 70534e809a..8c328e2fbd 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -124,6 +124,21 @@ func GetProjectID(b []byte, override string) string { return v.Project } +// DoRequest executes the provided req with the client. It reads the response +// body, closes it, and returns it. +func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) { + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize)) + if err != nil { + return nil, nil, err + } + return resp, body, nil +} + // ReadAll consumes the whole reader and safely reads the content of its body // with some overflow protection. func ReadAll(r io.Reader) ([]byte, error) { @@ -167,8 +182,7 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string // httpGetMetadataUniverseDomain is a package var for unit test substitution. var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { client := metadata.NewClient(&http.Client{Timeout: time.Second}) - // TODO(quartzmo): set ctx on request - return client.Get("universe/universe_domain") + return client.GetWithContext(ctx, "universe/universe_domain") } func getMetadataUniverseDomain(ctx context.Context) (string, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 6ef88311a2..d94e0af08a 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -176,7 +176,7 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, } func getTransportConfig(opts *Options) (*transportConfig, error) { - clientCertSource, err := getClientCertificateSource(opts) + clientCertSource, err := GetClientCertificateProvider(opts) if err != nil { return nil, err } @@ -210,13 +210,13 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { }, nil } -// getClientCertificateSource returns a default client certificate source, if +// GetClientCertificateProvider returns a default client certificate source, if // not provided by the user. // // A nil default source can be returned if the source does not exist. Any exceptions // encountered while initializing the default source will be reported as client // error (ex. corrupt metadata file). -func getClientCertificateSource(opts *Options) (cert.Provider, error) { +func GetClientCertificateProvider(opts *Options) (cert.Provider, error) { if !isClientCertificateEnabled(opts) { return nil, nil } else if opts.ClientCertProvider != nil { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go index 96582ce7b6..5cedc50f1e 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go @@ -50,11 +50,14 @@ var errSourceUnavailable = errors.New("certificate source is unavailable") // returned to indicate that a default certificate source is unavailable. func DefaultProvider() (Provider, error) { defaultCert.once.Do(func() { - defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") + defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("") if errors.Is(defaultCert.err, errSourceUnavailable) { - defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") if errors.Is(defaultCert.err, errSourceUnavailable) { - defaultCert.provider, defaultCert.err = nil, nil + defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = nil, nil + } } } }) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index ea1e1febbc..e8675bf824 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -99,7 +99,7 @@ func getCertAndKeyFiles(configFilePath string) (string, string, error) { } if config.CertConfigs.Workload == nil { - return "", "", errors.New("no Workload Identity Federation certificate information found in the certificate configuration file") + return "", "", errSourceUnavailable } certFile := config.CertConfigs.Workload.CertPath diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index b76386d3c0..718a6b1714 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -17,7 +17,11 @@ package transport import ( + "crypto/tls" "fmt" + "net" + "net/http" + "time" "cloud.google.com/go/auth/credentials" ) @@ -49,11 +53,11 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt } // Smartly size this memory and copy below. - if oldDo.CredentialsJSON != nil { + if len(oldDo.CredentialsJSON) > 0 { newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON)) copy(newDo.CredentialsJSON, oldDo.CredentialsJSON) } - if oldDo.Scopes != nil { + if len(oldDo.Scopes) > 0 { newDo.Scopes = make([]string, len(oldDo.Scopes)) copy(newDo.Scopes, oldDo.Scopes) } @@ -74,3 +78,26 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri } return nil } + +// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index 1b8d83c4b4..a8ce6cd8a8 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -62,7 +62,8 @@ type Options3LO struct { // Optional. Client *http.Client // EarlyTokenExpiry is the time before the token expires that it should be - // refreshed. If not set the default value is 10 seconds. Optional. + // refreshed. If not set the default value is 3 minutes and 45 seconds. + // Optional. EarlyTokenExpiry time.Duration // AuthHandlerOpts provides a set of options for doing a @@ -284,7 +285,7 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin v.Set("client_secret", o.ClientSecret) } } - req, err := http.NewRequest("POST", o.TokenURL, strings.NewReader(v.Encode())) + req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode())) if err != nil { return nil, refreshToken, err } @@ -294,25 +295,19 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin } // Make request - r, err := o.client().Do(req.WithContext(ctx)) + resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } - body, err := internal.ReadAll(r.Body) - r.Body.Close() - if err != nil { - return nil, refreshToken, fmt.Errorf("auth: cannot fetch token: %w", err) - } - - failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ - Response: r, + Response: resp, Body: body, } var token *Token // errors ignored because of default switch on content - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": // some endpoints return a query string diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 967e060747..2cbb405dec 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,17 @@ # Changes +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01) + + +### Features + +* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3)) + + +### Documentation + +* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index f67e3c7eea..e686f24d15 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -88,16 +88,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -func (c *cachedValue) get(cl *Client) (v string, err error) { +func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(context.Background(), c.k) + v, err = cl.getTrimmed(ctx, c.k) } else { - v, err = cl.GetWithContext(context.Background(), c.k) + v, err = cl.GetWithContext(ctx, c.k) } if err == nil { c.v = v @@ -110,7 +110,9 @@ var ( onGCE bool ) -// OnGCE reports whether this process is running on Google Compute Engine. +// OnGCE reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCE` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE @@ -203,6 +205,8 @@ func systemInfoSuggestsGCE() bool { } // Subscribe calls Client.SubscribeWithContext on the default client. +// +// Deprecated: Please use the context aware variant [SubscribeWithContext]. func Subscribe(suffix string, fn func(v string, ok bool) error) error { return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } @@ -225,55 +229,188 @@ func GetWithContext(ctx context.Context, suffix string) (string, error) { } // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } +// +// Deprecated: Please use the context aware variant [ProjectIDWithContext]. +func ProjectID() (string, error) { + return defaultClient.ProjectIDWithContext(context.Background()) +} + +// ProjectIDWithContext returns the current instance's project ID string. +func ProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.ProjectIDWithContext(ctx) +} // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } +// +// Deprecated: Please use the context aware variant [NumericProjectIDWithContext]. +func NumericProjectID() (string, error) { + return defaultClient.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func NumericProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.NumericProjectIDWithContext(ctx) +} // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } +// +// Deprecated: Please use the context aware variant [InternalIPWithContext]. +func InternalIP() (string, error) { + return defaultClient.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func InternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.InternalIPWithContext(ctx) +} // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// +// Deprecated: Please use the context aware variant [ExternalIPWithContext]. +func ExternalIP() (string, error) { + return defaultClient.ExternalIPWithContext(context.Background()) +} -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func ExternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.ExternalIPWithContext(ctx) +} + +// Email calls Client.EmailWithContext on the default client. +// +// Deprecated: Please use the context aware variant [EmailWithContext]. +func Email(serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext calls Client.EmailWithContext on the default client. +func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(ctx, serviceAccount) +} // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } +// +// Deprecated: Please use the context aware variant [HostnameWithContext]. +func Hostname() (string, error) { + return defaultClient.HostnameWithContext(context.Background()) +} + +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func HostnameWithContext(ctx context.Context) (string, error) { + return defaultClient.HostnameWithContext(ctx) +} // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } +// +// Deprecated: Please use the context aware variant [InstanceTagsWithContext]. +func InstanceTags() ([]string, error) { + return defaultClient.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTagsWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceTagsWithContext(ctx) +} // InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } +// +// Deprecated: Please use the context aware variant [InstanceIDWithContext]. +func InstanceID() (string, error) { + return defaultClient.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func InstanceIDWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceIDWithContext(ctx) +} // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } +// +// Deprecated: Please use the context aware variant [InstanceNameWithContext]. +func InstanceName() (string, error) { + return defaultClient.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func InstanceNameWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceNameWithContext(ctx) +} // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } +// +// Deprecated: Please use the context aware variant [ZoneWithContext]. +func Zone() (string, error) { + return defaultClient.ZoneWithContext(context.Background()) +} -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func ZoneWithContext(ctx context.Context) (string, error) { + return defaultClient.ZoneWithContext(ctx) +} -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } +// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributesWithContext. +func InstanceAttributes() ([]string, error) { + return defaultClient.InstanceAttributesWithContext(context.Background()) +} -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceAttributesWithContext(ctx) +} + +// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributesWithContext]. +func ProjectAttributes() ([]string, error) { + return defaultClient.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.ProjectAttributesWithContext(ctx) +} + +// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext]. func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) + return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr) } -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client. +func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.InstanceAttributeValueWithContext(ctx, attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext]. func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) + return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client. +func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.ProjectAttributeValueWithContext(ctx, attr) } -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } +// Scopes calls Client.ScopesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ScopesWithContext]. +func Scopes(serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext calls Client.ScopesWithContext on the default client. +func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(ctx, serviceAccount) +} func strsContains(ss []string, s string) bool { for _, v := range ss { @@ -296,7 +433,6 @@ func NewClient(c *http.Client) *Client { if c == nil { return defaultClient } - return &Client{hc: c} } @@ -381,6 +517,10 @@ func (c *Client) Get(suffix string) (string, error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// NOTE: Without an extra deadline in the context this call can take in the +// worst case, with internal backoff retries, up to 15 seconds (e.g. when server +// is responding slowly). Pass context with additional timeouts when needed. func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { val, _, err := c.getETag(ctx, suffix) return val, err @@ -392,8 +532,8 @@ func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err e return } -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.GetWithContext(context.Background(), suffix) +func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) { + j, err := c.GetWithContext(ctx, suffix) if err != nil { return nil, err } @@ -405,45 +545,104 @@ func (c *Client) lines(suffix string) ([]string, error) { } // ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext]. +func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) } + +// ProjectIDWithContext returns the current instance's project ID string. +func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) } // NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } +// +// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext]. +func (c *Client) NumericProjectID() (string, error) { + return c.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) { + return projNum.get(ctx, c) +} // InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext]. +func (c *Client) InstanceID() (string, error) { + return c.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) { + return instID.get(ctx, c) +} // InternalIP returns the instance's primary internal IP address. +// +// Deprecated: Please use the context aware variant [Client.InternalIPWithContext]. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") + return c.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. +// +// Deprecated: Please use the context aware variant [Client.EmailWithContext]. func (c *Client) Email(serviceAccount string) (string, error) { + return c.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext returns the email address associated with the service account. +// The serviceAccount parameter default value (empty string or "default" value) +// will use the instance's main account. +func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") + return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. +// +// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext]. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") + return c.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". +// +// Deprecated: Please use the context aware variant [Client.HostnameWithContext]. func (c *Client) Hostname() (string, error) { - return c.getTrimmed(context.Background(), "instance/hostname") + return c.HostnameWithContext(context.Background()) } -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) HostnameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags. +// +// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext]. func (c *Client) InstanceTags() ([]string, error) { + return c.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) { var s []string - j, err := c.GetWithContext(context.Background(), "instance/tags") + j, err := c.GetWithContext(ctx, "instance/tags") if err != nil { return nil, err } @@ -454,13 +653,27 @@ func (c *Client) InstanceTags() ([]string, error) { } // InstanceName returns the current VM's instance ID string. +// +// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext]. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed(context.Background(), "instance/name") + return c.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". +// +// Deprecated: Please use the context aware variant [Client.ZoneWithContext]. func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed(context.Background(), "instance/zone") + return c.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func (c *Client) ZoneWithContext(ctx context.Context) (string, error) { + zone, err := c.getTrimmed(ctx, "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -471,12 +684,34 @@ func (c *Client) Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext]. +func (c *Client) InstanceAttributes() ([]string, error) { + return c.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "instance/attributes/") +} // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext]. +func (c *Client) ProjectAttributes() ([]string, error) { + return c.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "project/attributes/") +} // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -486,8 +721,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext]. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "instance/attributes/"+attr) + return c.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -498,18 +747,41 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext]. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "project/attributes/"+attr) + return c.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. +// +// Deprecated: Please use the context aware variant [Client.ScopesWithContext]. func (c *Client) Scopes(serviceAccount string) ([]string, error) { + return c.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes") } // Subscribe subscribes to a value from the metadata service. diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index af5ff37488..5aab66312b 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,20 @@ # Changes +## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [1.1.9](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.8...iam/v1.1.9) (2024-06-26) + + +### Bug Fixes + +* **iam:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568)) + ## [1.1.8](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.7...iam/v1.1.8) (2024-05-01) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 3fbf4530d0..619b4c4fa3 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc v4.25.3 // source: google/iam/v1/iam_policy.proto @@ -388,7 +388,7 @@ func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { } var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ +var file_google_iam_v1_iam_policy_proto_goTypes = []any{ (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest @@ -422,7 +422,7 @@ func file_google_iam_v1_iam_policy_proto_init() { file_google_iam_v1_options_proto_init() file_google_iam_v1_policy_proto_init() if !protoimpl.UnsafeEnabled { - file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyRequest); i { case 0: return &v.state @@ -434,7 +434,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyRequest); i { case 0: return &v.state @@ -446,7 +446,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsRequest); i { case 0: return &v.state @@ -458,7 +458,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsResponse); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 29738ad1ce..f1c1c084e3 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc v4.25.3 // source: google/iam/v1/options.proto @@ -136,7 +136,7 @@ func file_google_iam_v1_options_proto_rawDescGZIP() []byte { } var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_iam_v1_options_proto_goTypes = []interface{}{ +var file_google_iam_v1_options_proto_goTypes = []any{ (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions } var file_google_iam_v1_options_proto_depIdxs = []int32{ @@ -153,7 +153,7 @@ func file_google_iam_v1_options_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GetPolicyOptions); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index a4e15741b6..4dda5d6d05 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc v4.25.3 // source: google/iam/v1/policy.proto @@ -1036,7 +1036,7 @@ func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_google_iam_v1_policy_proto_goTypes = []interface{}{ +var file_google_iam_v1_policy_proto_goTypes = []any{ (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action @@ -1073,7 +1073,7 @@ func file_google_iam_v1_policy_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Policy); i { case 0: return &v.state @@ -1085,7 +1085,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Binding); i { case 0: return &v.state @@ -1097,7 +1097,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*AuditConfig); i { case 0: return &v.state @@ -1109,7 +1109,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AuditLogConfig); i { case 0: return &v.state @@ -1121,7 +1121,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*PolicyDelta); i { case 0: return &v.state @@ -1133,7 +1133,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*BindingDelta); i { case 0: return &v.state @@ -1145,7 +1145,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*AuditConfigDelta); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 655fc5d824..a3e99df29b 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -1029,6 +1029,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/developerconnect/apiv1": { + "api_shortname": "developerconnect", + "distribution_name": "cloud.google.com/go/developerconnect/apiv1", + "description": "Developer Connect API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/developerconnect/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/dialogflow/apiv2": { "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/apiv2", @@ -1519,6 +1529,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/managedkafka/apiv1": { + "api_shortname": "managedkafka", + "distribution_name": "cloud.google.com/go/managedkafka/apiv1", + "description": "Apache Kafka for BigQuery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/maps/addressvalidation/apiv1": { "api_shortname": "addressvalidation", "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", @@ -1549,16 +1569,6 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { - "api_shortname": "mapsplatformdatasets", - "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", - "description": "Maps Platform Datasets API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/maps/places/apiv1": { "api_shortname": "places", "distribution_name": "cloud.google.com/go/maps/places/apiv1", @@ -1566,7 +1576,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", - "release_level": "stable", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/routeoptimization/apiv1": { @@ -1596,7 +1606,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/solar/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/mediatranslation/apiv1beta1": { @@ -1749,6 +1759,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/networkservices/apiv1": { + "api_shortname": "networkservices", + "distribution_name": "cloud.google.com/go/networkservices/apiv1", + "description": "Network Services API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkservices/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/notebooks/apiv1": { "api_shortname": "notebooks", "distribution_name": "cloud.google.com/go/notebooks/apiv1", @@ -2112,7 +2132,7 @@ "cloud.google.com/go/retail/apiv2": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", @@ -2122,7 +2142,7 @@ "cloud.google.com/go/retail/apiv2alpha": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2alpha", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", @@ -2132,7 +2152,7 @@ "cloud.google.com/go/retail/apiv2beta": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2beta", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", @@ -2389,6 +2409,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/accounts/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/accounts/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/accounts/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/conversions/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/conversions/apiv1beta", @@ -2399,6 +2429,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/datasources/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/datasources/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/datasources/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", @@ -2429,6 +2469,26 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/products/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/products/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/products/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/promotions/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/promotions/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/promotions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/quota/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/quota/apiv1beta", diff --git a/vendor/cloud.google.com/go/internal/gen_info.sh b/vendor/cloud.google.com/go/internal/gen_info.sh new file mode 100644 index 0000000000..59c1906538 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/gen_info.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +# Script to generate info.go files with methods for all clients. + +if [[ $# != 2 ]]; then + echo >&2 "usage: $0 DIR PACKAGE" + exit 1 +fi + +outfile=info.go + +cd $1 + +cat <<'EOF' > $outfile +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. + +EOF + +echo -e >> $outfile "package $2\n" + + +awk '/^func \(c \*[A-Z].*\) setGoogleClientInfo/ { + printf("func (c %s SetGoogleClientInfo(keyval ...string) {\n", $3); + printf(" c.setGoogleClientInfo(keyval...)\n"); + printf("}\n\n"); +}' *_client.go >> $outfile + +gofmt -w $outfile diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go index 97738b2cbe..e8daf800a6 100644 --- a/vendor/cloud.google.com/go/internal/trace/trace.go +++ b/vendor/cloud.google.com/go/internal/trace/trace.go @@ -33,17 +33,22 @@ import ( ) const ( + // Deprecated: The default experimental tracing support for OpenCensus is + // now deprecated in the Google Cloud client libraries for Go. // TelemetryPlatformTracingOpenCensus is the value to which the environment // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be // set to enable OpenCensus tracing. TelemetryPlatformTracingOpenCensus = "opencensus" - // TelemetryPlatformTracingOpenCensus is the value to which the environment + // TelemetryPlatformTracingOpenTelemetry is the value to which the environment // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be // set to enable OpenTelemetry tracing. TelemetryPlatformTracingOpenTelemetry = "opentelemetry" - // TelemetryPlatformTracingOpenCensus is the name of the environment - // variable that can be set to change the default tracing from OpenCensus - // to OpenTelemetry. + // TelemetryPlatformTracingVar is the name of the environment + // variable that can be set to change the default tracing from OpenTelemetry + // to OpenCensus. + // + // The default experimental tracing support for OpenCensus is now deprecated + // in the Google Cloud client libraries for Go. TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING" // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer // when it is obtained from the OpenTelemetry TracerProvider. @@ -51,47 +56,58 @@ const ( ) var ( - // openTelemetryTracingEnabledMu guards access to openTelemetryTracingEnabled field - openTelemetryTracingEnabledMu = sync.RWMutex{} - // openTelemetryTracingEnabled is true if the environment variable + // openCensusTracingEnabledMu guards access to openCensusTracingEnabled field + openCensusTracingEnabledMu = sync.RWMutex{} + // openCensusTracingEnabled is true if the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the - // case-insensitive value "opentelemetry". - openTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace( - os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenTelemetry) + // case-insensitive value "opencensus". + openCensusTracingEnabled bool = strings.EqualFold(strings.TrimSpace( + os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenCensus) ) -// SetOpenTelemetryTracingEnabledField programmatically sets the value provided by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of unit testing. -// Do not invoke it directly. Intended for use only in unit tests. Restore original value after each test. +// SetOpenTelemetryTracingEnabledField programmatically sets the value provided +// by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of +// unit testing. Do not invoke it directly. Intended for use only in unit tests. +// Restore original value after each test. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func SetOpenTelemetryTracingEnabledField(enabled bool) { - openTelemetryTracingEnabledMu.Lock() - defer openTelemetryTracingEnabledMu.Unlock() - openTelemetryTracingEnabled = enabled + openCensusTracingEnabledMu.Lock() + defer openCensusTracingEnabledMu.Unlock() + openCensusTracingEnabled = !enabled } +// Deprecated: The default experimental tracing support for OpenCensus is now +// deprecated in the Google Cloud client libraries for Go. +// // IsOpenCensusTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the -// case-insensitive value "opentelemetry". +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the +// case-insensitive value "opencensus". func IsOpenCensusTracingEnabled() bool { - return !IsOpenTelemetryTracingEnabled() + openCensusTracingEnabledMu.RLock() + defer openCensusTracingEnabledMu.RUnlock() + return openCensusTracingEnabled } // IsOpenTelemetryTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the -// case-insensitive value "opentelemetry". +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the +// case-insensitive value "opencensus". func IsOpenTelemetryTracingEnabled() bool { - openTelemetryTracingEnabledMu.RLock() - defer openTelemetryTracingEnabledMu.RUnlock() - return openTelemetryTracingEnabled + return !IsOpenCensusTracingEnabled() } // StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled // returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled // returns true, the span will be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func StartSpan(ctx context.Context, name string) context.Context { if IsOpenTelemetryTracingEnabled() { ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name) @@ -105,10 +121,13 @@ func StartSpan(ctx context.Context, name string) context.Context { // returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled // returns true, the span will be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func EndSpan(ctx context.Context, err error) { if IsOpenTelemetryTracingEnabled() { span := ottrace.SpanFromContext(ctx) @@ -191,10 +210,13 @@ func httpStatusCodeToOCCode(httpStatusCode int) int32 { // OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected // span must be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { if IsOpenTelemetryTracingEnabled() { attrs := otAttrs(attrMap) diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 625ad4fbe7..2da498b8e7 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,30 @@ # Changes +## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.40.0...storage/v1.41.0) (2024-05-13) + + +### Features + +* **storage/control:** Make Managed Folders operations public ([264a6dc](https://github.com/googleapis/google-cloud-go/commit/264a6dcddbffaec987dce1dc00f6550c263d2df7)) +* **storage:** Support for soft delete policies and restore ([#9520](https://github.com/googleapis/google-cloud-go/issues/9520)) ([985deb2](https://github.com/googleapis/google-cloud-go/commit/985deb2bdd1c79944cdd960bd3fbfa38cbfa1c91)) + + +### Bug Fixes + +* **storage/control:** An existing resource pattern value `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder=**}` to resource definition `storage.googleapis.com/ManagedFolder` is removed ([3e25053](https://github.com/googleapis/google-cloud-go/commit/3e250530567ee81ed4f51a3856c5940dbec35289)) +* **storage:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90)) +* **storage:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) +* **storage:** Disable gax retries for gRPC ([#9747](https://github.com/googleapis/google-cloud-go/issues/9747)) ([bbfc0ac](https://github.com/googleapis/google-cloud-go/commit/bbfc0acc272f21bf1f558ea23648183d5a11cda5)) +* **storage:** More strongly match regex ([#9706](https://github.com/googleapis/google-cloud-go/issues/9706)) ([3cfc8eb](https://github.com/googleapis/google-cloud-go/commit/3cfc8eb418e064d734bf3d8708162062dbbe988f)), refs [#9705](https://github.com/googleapis/google-cloud-go/issues/9705) +* **storage:** Retry net.OpError on connection reset ([#10154](https://github.com/googleapis/google-cloud-go/issues/10154)) ([54fab10](https://github.com/googleapis/google-cloud-go/commit/54fab107f98b4f79c9df2959a05b981be0a613c1)), refs [#9478](https://github.com/googleapis/google-cloud-go/issues/9478) +* **storage:** Wrap error when MaxAttempts is hit ([#9767](https://github.com/googleapis/google-cloud-go/issues/9767)) ([9cb262b](https://github.com/googleapis/google-cloud-go/commit/9cb262bb65a162665bfb8bed0022615131bae1f2)), refs [#9720](https://github.com/googleapis/google-cloud-go/issues/9720) + + +### Documentation + +* **storage/control:** Update storage control documentation and add PHP for publishing ([1d757c6](https://github.com/googleapis/google-cloud-go/commit/1d757c66478963d6cbbef13fee939632c742759c)) + ## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29) diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 0344ef9de3..d2da86e914 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -479,6 +479,13 @@ type BucketAttrs struct { // cannot be modified once the bucket is created. // ObjectRetention cannot be configured or reported through the gRPC API. ObjectRetentionMode string + + // SoftDeletePolicy contains the bucket's soft delete policy, which defines + // the period of time that soft-deleted objects will be retained, and cannot + // be permanently deleted. By default, new buckets will be created with a + // 7 day retention duration. In order to fully disable soft delete, you need + // to set a policy with a RetentionDuration of 0. + SoftDeletePolicy *SoftDeletePolicy } // BucketPolicyOnly is an alias for UniformBucketLevelAccess. @@ -766,6 +773,19 @@ type Autoclass struct { TerminalStorageClassUpdateTime time.Time } +// SoftDeletePolicy contains the bucket's soft delete policy, which defines the +// period of time that soft-deleted objects will be retained, and cannot be +// permanently deleted. +type SoftDeletePolicy struct { + // EffectiveTime indicates the time from which the policy, or one with a + // greater retention, was effective. This field is read-only. + EffectiveTime time.Time + + // RetentionDuration is the amount of time that soft-deleted objects in the + // bucket will be retained and cannot be permanently deleted. + RetentionDuration time.Duration +} + func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if b == nil { return nil, nil @@ -803,6 +823,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { RPO: toRPO(b), CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), Autoclass: toAutoclassFromRaw(b.Autoclass), + SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy), }, nil } @@ -836,6 +857,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based Autoclass: toAutoclassFromProto(b.GetAutoclass()), + SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy), } } @@ -891,6 +913,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { Rpo: b.RPO.String(), CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), Autoclass: b.Autoclass.toRawAutoclass(), + SoftDeletePolicy: b.SoftDeletePolicy.toRawSoftDeletePolicy(), } } @@ -951,6 +974,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { Rpo: b.RPO.String(), CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), Autoclass: b.Autoclass.toProtoAutoclass(), + SoftDeletePolicy: b.SoftDeletePolicy.toProtoSoftDeletePolicy(), } } @@ -1032,6 +1056,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { IamConfig: bktIAM, Rpo: ua.RPO.String(), Autoclass: ua.Autoclass.toProtoAutoclass(), + SoftDeletePolicy: ua.SoftDeletePolicy.toProtoSoftDeletePolicy(), Labels: ua.setLabels, } } @@ -1152,6 +1177,9 @@ type BucketAttrsToUpdate struct { // See https://cloud.google.com/storage/docs/using-autoclass for more information. Autoclass *Autoclass + // If set, updates the soft delete policy of the bucket. + SoftDeletePolicy *SoftDeletePolicy + // acl is the list of access control rules on the bucket. // It is unexported and only used internally by the gRPC client. // Library users should use ACLHandle methods directly. @@ -1273,6 +1301,14 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass") } + if ua.SoftDeletePolicy != nil { + if ua.SoftDeletePolicy.RetentionDuration == 0 { + rb.NullFields = append(rb.NullFields, "SoftDeletePolicy") + rb.SoftDeletePolicy = nil + } else { + rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy() + } + } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. rb.Acl = nil @@ -2053,6 +2089,53 @@ func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { } } +func (p *SoftDeletePolicy) toRawSoftDeletePolicy() *raw.BucketSoftDeletePolicy { + if p == nil { + return nil + } + // Excluding read only field EffectiveTime. + return &raw.BucketSoftDeletePolicy{ + RetentionDurationSeconds: int64(p.RetentionDuration.Seconds()), + } +} + +func (p *SoftDeletePolicy) toProtoSoftDeletePolicy() *storagepb.Bucket_SoftDeletePolicy { + if p == nil { + return nil + } + // Excluding read only field EffectiveTime. + return &storagepb.Bucket_SoftDeletePolicy{ + RetentionDuration: durationpb.New(p.RetentionDuration), + } +} + +func toSoftDeletePolicyFromRaw(p *raw.BucketSoftDeletePolicy) *SoftDeletePolicy { + if p == nil { + return nil + } + + policy := &SoftDeletePolicy{ + RetentionDuration: time.Duration(p.RetentionDurationSeconds) * time.Second, + } + + // Return EffectiveTime only if parsed to a valid value. + if t, err := time.Parse(time.RFC3339, p.EffectiveTime); err == nil { + policy.EffectiveTime = t + } + + return policy +} + +func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDeletePolicy { + if p == nil { + return nil + } + return &SoftDeletePolicy{ + EffectiveTime: p.GetEffectiveTime().AsTime(), + RetentionDuration: p.GetRetentionDuration().AsDuration(), + } +} + // Objects returns an iterator over the objects in the bucket that match the // Query q. If q is nil, no filtering is done. Objects will be iterated over // lexicographically by name. diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 70b2a280e3..bbe89276a4 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -59,8 +59,9 @@ type storageClient interface { // Object metadata methods. DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error - GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) + RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) // Default Object ACL methods. @@ -182,16 +183,6 @@ type storageOption interface { Apply(s *settings) } -func withGAXOptions(opts ...gax.CallOption) storageOption { - return &gaxOption{opts} -} - -type gaxOption struct { - opts []gax.CallOption -} - -func (o *gaxOption) Apply(s *settings) { s.gax = o.opts } - func withRetryConfig(rc *retryConfig) storageOption { return &retryOption{rc} } @@ -294,6 +285,14 @@ type newRangeReaderParams struct { readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. } +type getObjectParams struct { + bucket, object string + gen int64 + encryptionKey []byte + conds *Conditions + softDeleted bool +} + type updateObjectParams struct { bucket, object string uattrs *ObjectAttrsToUpdate @@ -303,6 +302,14 @@ type updateObjectParams struct { overrideRetention *bool } +type restoreObjectParams struct { + bucket, object string + gen int64 + encryptionKey []byte + conds *Conditions + copySourceACL bool +} + type composeObjectRequest struct { dstBucket string dstObject destinationObject diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index b23cebcb83..c274c762ea 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -350,7 +350,7 @@ To create a client which will use gRPC, use the alternate constructor: // Use client as usual. If the application is running within GCP, users may get better performance by -enabling Google Direct Access (enabling requests to skip some proxy steps). To enable, +enabling Direct Google Access (enabling requests to skip some proxy steps). To enable, set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add the following side-effect imports to your application: @@ -359,6 +359,13 @@ the following side-effect imports to your application: _ "google.golang.org/grpc/xds/googledirectpath" ) +# Storage Control API + +Certain control plane and long-running operations for Cloud Storage (including Folder +and Managed Folder operations) are supported via the autogenerated Storage Control +client, which is available as a subpackage in this module. See package docs at +[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs. + [Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam [XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object [Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy @@ -367,5 +374,6 @@ the following side-effect imports to your application: [impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account [IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview [custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata +[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2 */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index e337213f03..d81a17b6b0 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -28,7 +28,6 @@ import ( "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" "cloud.google.com/go/storage/internal/apiv2/storagepb" - "github.com/golang/protobuf/proto" "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) @@ -116,6 +116,8 @@ type grpcStorageClient struct { func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { s := initSettings(opts...) s.clientOption = append(defaultGRPCOptions(), s.clientOption...) + // Disable all gax-level retries in favor of retry logic in the veneer client. + s.gax = append(s.gax, gax.WithRetry(nil)) config := newStorageConfig(s.clientOption...) if config.readAPIWasSet { @@ -365,6 +367,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat if uattrs.Autoclass != nil { fieldMask.Paths = append(fieldMask.Paths, "autoclass") } + if uattrs.SoftDeletePolicy != nil { + fieldMask.Paths = append(fieldMask.Paths, "soft_delete_policy") + } for label := range uattrs.setLabels { fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) @@ -377,6 +382,13 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat req.UpdateMask = fieldMask + if len(fieldMask.Paths) < 1 { + // Nothing to update. Send a get request for current attrs instead. This + // maintains consistency with JSON bucket updates. + opts = append(opts, idempotent(true)) + return c.GetBucket(ctx, bucket, conds, opts...) + } + var battrs *BucketAttrs err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateBucket(ctx, req, s.gax...) @@ -419,6 +431,7 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, MatchGlob: it.query.MatchGlob, ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask + SoftDeleted: it.query.SoftDeleted, } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) @@ -488,22 +501,25 @@ func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object str return err } -func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { +func (c *grpcStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) req := &storagepb.GetObjectRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - Object: object, + Bucket: bucketResourceName(globalProjectAlias, params.bucket), + Object: params.object, // ProjectionFull by default. ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, } - if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { + if err := applyCondsProto("grpcStorageClient.GetObject", params.gen, params.conds, req); err != nil { return nil, err } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - if encryptionKey != nil { - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + if params.encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey) + } + if params.softDeleted { + req.SoftDeleted = ¶ms.softDeleted } var attrs *ObjectAttrs @@ -593,6 +609,17 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje req.UpdateMask = fieldMask + if len(fieldMask.Paths) < 1 { + // Nothing to update. To maintain consistency with JSON, we must still + // update the object because metageneration and other fields are + // updated even on an empty update. + // gRPC will fail if the fieldmask is empty, so instead we add an + // output-only field to the update mask. Output-only fields are (and must + // be - see AIP 161) ignored, but allow us to send an empty update because + // any mask that is valid for read (as this one is) must be valid for write. + fieldMask.Paths = append(fieldMask.Paths, "create_time") + } + var attrs *ObjectAttrs err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateObject(ctx, req, s.gax...) @@ -606,6 +633,32 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje return attrs, err } +func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.RestoreObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, params.bucket), + Object: params.object, + CopySourceAcl: ¶ms.copySourceACL, + } + if err := applyCondsProto("grpcStorageClient.RestoreObject", params.gen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var attrs *ObjectAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.RestoreObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + return err + }, s.retry, s.idempotent) + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + return attrs, err +} + // Default Object ACL methods. func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -735,7 +788,7 @@ func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { // There is no separate API for PATCH in gRPC. // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return err } @@ -768,7 +821,7 @@ func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object // ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. // Selecting a specific generation of this object is not currently supported by the client. func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + o, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return nil, err } @@ -778,7 +831,7 @@ func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object s func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { // There is no separate API for PATCH in gRPC. // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index f75d93897d..e01ae9c428 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -107,12 +107,12 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl // Append the emulator host as default endpoint for the user o = append([]option.ClientOption{option.WithoutAuthentication()}, o...) - o = append(o, internaloption.WithDefaultEndpoint(endpoint)) + o = append(o, internaloption.WithDefaultEndpointTemplate(endpoint)) o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint)) } s.clientOption = o - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, s.clientOption...) if err != nil { return nil, fmt.Errorf("dialing: %w", err) @@ -337,6 +337,9 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q } fetch := func(pageSize int, pageToken string) (string, error) { req := c.raw.Objects.List(bucket) + if it.query.SoftDeleted { + req.SoftDeleted(it.query.SoftDeleted) + } setClientHeader(req.Header()) projection := it.query.Projection if projection == ProjectionDefault { @@ -409,18 +412,22 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str return err } -func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { +func (c *httpStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) - req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx) - if err := applyConds("Attrs", gen, conds, req); err != nil { + req := c.raw.Objects.Get(params.bucket, params.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", params.gen, params.conds, req); err != nil { return nil, err } if s.userProject != "" { req.UserProject(s.userProject) } - if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil { + if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil { return nil, err } + if params.softDeleted { + req.SoftDeleted(params.softDeleted) + } + var obj *raw.Object var err error err = run(ctx, func(ctx context.Context) error { @@ -547,6 +554,33 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObje return newObject(obj), nil } +func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Restore(params.bucket, params.object, params.gen).Context(ctx) + // Do not set the generation here since it's not an optional condition; it gets set above. + if err := applyConds("RestoreObject", defaultGen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if params.copySourceACL { + req.CopySourceAcl(params.copySourceACL) + } + if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil { + return nil, err + } + + var obj *raw.Object + var err error + err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + return newObject(obj), err +} + // Default Object ACL methods. func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index 9637bc0a5b..b63d664e5e 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.33.0 +// protoc v4.25.3 // source: google/storage/v2/storage.proto package storagepb diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 1c52a3504b..c3cf41cb71 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.40.0" +const Version = "1.41.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index 1b52eb5d2c..ffc49a808d 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -70,8 +70,8 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry return internal.Retry(ctx, bo, func() (stop bool, err error) { ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) err = call(ctxWithHeaders) - if retry.maxAttempts != nil && attempts >= *retry.maxAttempts { - return true, err + if err != nil && retry.maxAttempts != nil && attempts >= *retry.maxAttempts { + return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err) } attempts++ return !errorFunc(err), err @@ -105,18 +105,16 @@ func ShouldRetry(err error) bool { if errors.Is(err, io.ErrUnexpectedEOF) { return true } + if errors.Is(err, net.ErrClosed) { + return true + } switch e := err.(type) { - case *net.OpError: - if strings.Contains(e.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string - return true - } case *googleapi.Error: // Retry on 408, 429, and 5xx, according to // https://cloud.google.com/storage/docs/exponential-backoff. return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: + case *net.OpError, *url.Error: // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). // Unfortunately the error type is unexported, so we resort to string // matching. diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index 56f3e3daa5..1d6cfdf598 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -116,7 +116,7 @@ func toProtoNotification(n *Notification) *storagepb.NotificationConfig { } } -var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") +var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`) // parseNotificationTopic extracts the project and topic IDs from from the full // resource name returned by the service. If the name is malformed, it returns diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index c01085f35d..0c335f38a9 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -180,12 +180,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error opts = append([]option.ClientOption{ option.WithoutAuthentication(), internaloption.SkipDialSettingsValidation(), - internaloption.WithDefaultEndpoint(endpoint), + internaloption.WithDefaultEndpointTemplate(endpoint), internaloption.WithDefaultMTLSEndpoint(endpoint), }, opts...) } - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %w", err) @@ -232,7 +232,6 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // You may configure the client by passing in options from the [google.golang.org/api/option] // package. func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - opts = append(defaultGRPCOptions(), opts...) tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) if err != nil { return nil, err @@ -898,6 +897,7 @@ type ObjectHandle struct { readCompressed bool // Accept-Encoding: gzip retry *retryConfig overrideRetention *bool + softDeleted bool } // ACL provides access to the object's access control list. @@ -952,7 +952,7 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error return nil, err } opts := makeStorageOpts(true, o.retry, o.userProject) - return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...) + return o.c.tc.GetObject(ctx, &getObjectParams{o.bucket, o.object, o.gen, o.encryptionKey, o.conds, o.softDeleted}, opts...) } // Update updates an object with the provided attributes. See @@ -1057,6 +1057,50 @@ func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle { return &o2 } +// SoftDeleted returns an object handle that can be used to get an object that +// has been soft deleted. To get a soft deleted object, the generation must be +// set on the object using ObjectHandle.Generation. +// Note that an error will be returned if a live object is queried using this. +func (o *ObjectHandle) SoftDeleted() *ObjectHandle { + o2 := *o + o2.softDeleted = true + return &o2 +} + +// RestoreOptions allows you to set options when restoring an object. +type RestoreOptions struct { + /// CopySourceACL indicates whether the restored object should copy the + // access controls of the source object. Only valid for buckets with + // fine-grained access. If uniform bucket-level access is enabled, setting + // CopySourceACL will cause an error. + CopySourceACL bool +} + +// Restore will restore a soft-deleted object to a live object. +// Note that you must specify a generation to use this method. +func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + + // Since the generation is required by restore calls, we set the default to + // 0 instead of a negative value, which returns a more descriptive error. + gen := o.gen + if o.gen == defaultGen { + gen = 0 + } + + // Restore is always idempotent because Generation is a required param. + sOpts := makeStorageOpts(true, o.retry, o.userProject) + return o.c.tc.RestoreObject(ctx, &restoreObjectParams{ + bucket: o.bucket, + object: o.object, + gen: gen, + conds: o.conds, + copySourceACL: opts.CopySourceACL, + }, sOpts...) +} + // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // @@ -1390,6 +1434,21 @@ type ObjectAttrs struct { // Retention contains the retention configuration for this object. // ObjectRetention cannot be configured or reported through the gRPC API. Retention *ObjectRetention + + // SoftDeleteTime is the time when the object became soft-deleted. + // Soft-deleted objects are only accessible on an object handle returned by + // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set, + // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted. + // This field is read-only. + SoftDeleteTime time.Time + + // HardDeleteTime is the time when the object will be permanently deleted. + // Only set when an object becomes soft-deleted with a soft delete policy. + // Soft-deleted objects are only accessible on an object handle returned by + // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set, + // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted. + // This field is read-only. + HardDeleteTime time.Time } // ObjectRetention contains the retention configuration for this object. @@ -1494,6 +1553,8 @@ func newObject(o *raw.Object) *ObjectAttrs { CustomTime: convertTime(o.CustomTime), ComponentCount: o.ComponentCount, Retention: toObjectRetention(o.Retention), + SoftDeleteTime: convertTime(o.SoftDeleteTime), + HardDeleteTime: convertTime(o.HardDeleteTime), } } @@ -1529,6 +1590,8 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { Updated: convertProtoTime(o.GetUpdateTime()), CustomTime: convertProtoTime(o.GetCustomTime()), ComponentCount: int64(o.ComponentCount), + SoftDeleteTime: convertProtoTime(o.GetSoftDeleteTime()), + HardDeleteTime: convertProtoTime(o.GetHardDeleteTime()), } } @@ -1637,6 +1700,11 @@ type Query struct { // prefixes returned by the query. Only applicable if Delimiter is set to /. // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. IncludeFoldersAsPrefixes bool + + // SoftDeleted indicates whether to list soft-deleted objects. + // If true, only objects that have been soft-deleted will be listed. + // By default, soft-deleted objects are not listed. + SoftDeleted bool } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1672,6 +1740,8 @@ var attrToFieldMap = map[string]string{ "CustomTime": "customTime", "ComponentCount": "componentCount", "Retention": "retention", + "HardDeleteTime": "hardDeleteTime", + "SoftDeleteTime": "softDeleteTime", } // attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1704,6 +1774,8 @@ var attrToProtoFieldMap = map[string]string{ "CustomerKeySHA256": "customer_encryption", "CustomTime": "custom_time", "ComponentCount": "component_count", + "HardDeleteTime": "hard_delete_time", + "SoftDeleteTime": "soft_delete_time", // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. // "MediaLink": "mediaLink", // TODO: add object retention - b/308194853 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index af095f1da9..1a9cedbaf0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 1.13.0 (2024-07-16) + +### Features Added + +- Added runtime.NewRequestFromRequest(), allowing for a policy.Request to be created from an existing *http.Request. + ## 1.12.0 (2024-06-06) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index 187fe82b97..00f2d5a0ab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -192,7 +192,7 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err } if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) { - //provider resource can only be on a tenant or a subscription parent + // provider resource can only be on a tenant or a subscription parent if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() { return nil, fmt.Errorf("invalid resource ID: %s", id) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go index 039b758bf9..6a7c916b43 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -34,18 +34,22 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP, Scopes: []string{conf.Audience + "/.default"}, }) + // we don't want to modify the underlying array in plOpts.PerRetry perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) copy(perRetry, plOpts.PerRetry) - plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + perRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + plOpts.PerRetry = perRetry if !options.DisableRPRegistration { regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions} regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts) if err != nil { return azruntime.Pipeline{}, err } + // we don't want to modify the underlying array in plOpts.PerCall perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1) copy(perCall, plOpts.PerCall) - plOpts.PerCall = append(perCall, regPolicy) + perCall = append(perCall, regPolicy) + plOpts.PerCall = perCall } if plOpts.APIVersion.Name == "" { plOpts.APIVersion.Name = "api-version" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 3041984d9b..e3e2d4e588 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -7,6 +7,7 @@ package exported import ( + "bytes" "context" "encoding/base64" "errors" @@ -67,6 +68,42 @@ func (ov opValues) get(value any) bool { return ok } +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +// Exported as runtime.NewRequestFromRequest(). +func NewRequestFromRequest(req *http.Request) (*Request, error) { + policyReq := &Request{req: req} + + if req.Body != nil { + // we can avoid a body copy here if the underlying stream is already a + // ReadSeekCloser. + readSeekCloser, isReadSeekCloser := req.Body.(io.ReadSeekCloser) + + if !isReadSeekCloser { + // since this is an already populated http.Request we want to copy + // over its body, if it has one. + bodyBytes, err := io.ReadAll(req.Body) + + if err != nil { + return nil, err + } + + if err := req.Body.Close(); err != nil { + return nil, err + } + + readSeekCloser = NopCloser(bytes.NewReader(bodyBytes)) + } + + // SetBody also takes care of updating the http.Request's body + // as well, so they should stay in-sync from this point. + if err := policyReq.SetBody(readSeekCloser, req.Header.Get("Content-Type")); err != nil { + return nil, err + } + } + + return policyReq, nil +} + // NewRequest creates a new Request with the specified input. // Exported as runtime.NewRequest(). func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 79651fd962..e5b28a9b1a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.12.0" + Version = "v1.13.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 40ddc8d922..7d34b7803a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -15,6 +15,7 @@ import ( "fmt" "io" "mime/multipart" + "net/http" "net/textproto" "net/url" "path" @@ -45,6 +46,11 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic return exported.NewRequest(ctx, httpMethod, endpoint) } +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +func NewRequestFromRequest(req *http.Request) (*policy.Request, error) { + return exported.NewRequestFromRequest(req) +} + // EncodeQueryParams will parse and encode any query parameters in the specified URL. // Any semicolons will automatically be escaped. func EncodeQueryParams(u string) (string, error) { diff --git a/vendor/github.com/felixge/fgprof/fgprof.go b/vendor/github.com/felixge/fgprof/fgprof.go index e4af2e4052..459787b6f6 100644 --- a/vendor/github.com/felixge/fgprof/fgprof.go +++ b/vendor/github.com/felixge/fgprof/fgprof.go @@ -81,6 +81,11 @@ type profiler struct { selfFrame *runtime.Frame } +// nullTerminationWorkaround deals with a regression in go1.23, see: +// - https://github.com/felixge/fgprof/issues/33 +// - https://go-review.googlesource.com/c/go/+/609815 +var nullTerminationWorkaround = runtime.Version() == "go1.23.0" + // GoroutineProfile returns the stacks of all goroutines currently managed by // the scheduler. This includes both goroutines that are currently running // (On-CPU), as well as waiting (Off-CPU). @@ -107,6 +112,11 @@ func (p *profiler) GoroutineProfile() []runtime.StackRecord { // p.stacks dynamically as well, but let's not over-engineer this until we // understand those cases better. for { + if nullTerminationWorkaround { + for i := range p.stacks { + p.stacks[i].Stack0 = [32]uintptr{} + } + } n, ok := runtime.GoroutineProfile(p.stacks) if !ok { p.stacks = make([]runtime.StackRecord, int(float64(n)*1.1)) diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index d51736e7e3..433693a69a 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.4" + "v2": "2.12.5" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 7e36eb48ff..b64522dfe0 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [2.12.5](https://github.com/googleapis/gax-go/compare/v2.12.4...v2.12.5) (2024-06-18) + + +### Bug Fixes + +* **v2/apierror:** fix (*APIError).Error() for unwrapped Status ([#351](https://github.com/googleapis/gax-go/issues/351)) ([22c16e7](https://github.com/googleapis/gax-go/commit/22c16e7bff5402bdc4c25063771cdd01c650b500)), refs [#350](https://github.com/googleapis/gax-go/issues/350) + ## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index d785a065ca..7de60773d6 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -206,8 +206,10 @@ func (a *APIError) Error() string { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) - } else if a.status != nil { + } else if a.status != nil && a.err != nil { msg = a.err.Error() + } else if a.status != nil { + msg = a.status.Message() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 3e53729e5f..f5273985af 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -163,11 +163,38 @@ func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { out = metadata.MD(make(map[string][]string)) } headers := callctx.HeadersFromContext(ctx) - for k, v := range headers { - out[k] = append(out[k], v...) + + // x-goog-api-client is a special case that we want to make sure gets merged + // into a single header. + const xGoogHeader = "x-goog-api-client" + var mergedXgoogHeader strings.Builder + + for k, vals := range headers { + if k == xGoogHeader { + // Merge all values for the x-goog-api-client header set on the ctx. + for _, v := range vals { + mergedXgoogHeader.WriteString(v) + mergedXgoogHeader.WriteRune(' ') + } + continue + } + out[k] = append(out[k], vals...) } for i := 0; i < len(keyvals); i = i + 2 { out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + + if keyvals[i] == xGoogHeader { + // Merge the x-goog-api-client header values set on the ctx with any + // values passed in for it from the client. + mergedXgoogHeader.WriteString(keyvals[i+1]) + mergedXgoogHeader.WriteRune(' ') + } + } + + // Add the x goog header back in, replacing the separate values that were set. + if mergedXgoogHeader.Len() > 0 { + out[xGoogHeader] = []string{mergedXgoogHeader.String()[:mergedXgoogHeader.Len()-1]} } + return out } diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 3006ad7bd9..4f780f4639 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.4" +const Version = "2.12.5" diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 78d7c9f5c8..a65d88eb86 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -73,7 +73,7 @@ go_test( "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//metadata", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 5dd4e44786..2f2b342431 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{ type ( rpcMethodKey struct{} httpPathPatternKey struct{} + httpPatternKey struct{} AnnotateContextOption func(ctx context.Context) context.Context ) @@ -404,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) } + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 5682998699..01f5734191 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -93,6 +93,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` var customStatus *HTTPStatusError if errors.As(err, &customStatus) { @@ -100,19 +101,28 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } s := status.Convert(err) - pb := s.Proto() w.Header().Del("Trailer") w.Header().Del("Transfer-Encoding") - contentType := marshaler.ContentType(pb) + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) w.Header().Set("Content-Type", contentType) if s.Code() == codes.Unauthenticated { w.Header().Set("WWW-Authenticate", s.Message()) } - buf, merr := marshaler.Marshal(pb) + buf, merr := marshaler.Marshal(respRw) if merr != nil { grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index de1eef1f4f..9f50a569e9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -3,6 +3,7 @@ package runtime import ( "context" "errors" + "fmt" "io" "net/http" "net/textproto" @@ -55,20 +56,27 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(resp)) + w.Header().Set("Content-Type", marshaler.ContentType(respRw)) } var buf []byte - httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) switch { - case resp == nil: + case respRw == nil: buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) case isHTTPBody: buf = httpBody.GetData() default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { result["result"] = rb.XXX_ResponseBody() } @@ -164,12 +172,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha HTTPError(ctx, mux, marshaler, w, req, err) return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { + if rb, ok := respRw.(responseBody); ok { buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) } else { - buf, err = marshaler.Marshal(resp) + buf, err = marshaler.Marshal(respRw) } if err != nil { grpclog.Errorf("Marshal error: %v", err) @@ -201,8 +214,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) - return err + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) } } return nil diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index ed9a7e4387..60c2065ddc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)") // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { // handlers maps HTTP method to a list of handlers. handlers map[string][]handler + middlewares []Middleware forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter marshalers marshalerRegistry incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc @@ -69,6 +76,24 @@ type ServeMux struct { // ServeMuxOption is an option that can be given to a ServeMux on construction. type ServeMuxOption func(*ServeMux) +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // // forwardResponseOption is an option that will be called on the relevant context.Context, @@ -89,6 +114,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { } } +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. @@ -277,13 +311,14 @@ func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMux // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - errorHandler: DefaultHTTPErrorHandler, - streamErrorHandler: DefaultStreamErrorHandler, - routingErrorHandler: DefaultRoutingErrorHandler, - unescapingMode: UnescapingModeDefault, + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -305,6 +340,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) } @@ -405,7 +443,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } continue } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } @@ -458,7 +496,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } _, outboundMarshaler := MarshalerForRequest(s, r) @@ -484,3 +522,16 @@ type handler struct { pat Pattern h HandlerFunc } + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go index 1a1ebb8b53..e035d15967 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go @@ -26,6 +26,14 @@ type MeshConfigEntry struct { // MutualTLSMode=permissive in either service-defaults or proxy-defaults. AllowEnablingPermissiveMutualTLS bool `json:",omitempty" alias:"allow_enabling_permissive_mutual_tls"` + // ValidateClusters controls whether the clusters the route table refers to are validated. The default value is + // false. When set to false and a route refers to a cluster that does not exist, the route table loads and routing + // to a non-existent cluster results in a 404. When set to true and the route is set to a cluster that do not exist, + // the route table will not load. For more information, refer to + // [HTTP route configuration in the Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route.proto#envoy-v3-api-field-config-route-v3-routeconfiguration-validate-clusters) + // for more details. + ValidateClusters bool `json:",omitempty" alias:"validate_clusters"` + TLS *MeshTLSConfig `json:",omitempty"` HTTP *MeshHTTPConfig `json:",omitempty"` diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 58275db3b8..8d5a2a4789 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -148,6 +148,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 3225 - DO bit (DNSSEC OK) * 340{1,2,3} - NAPTR record * 3445 - Limiting the scope of (DNS)KEY +* 3596 - AAAA record * 3597 - Unknown RRs * 4025 - A Method for Storing IPsec Keying Material in DNS * 403{3,4,5} - DNSSEC + validation functions @@ -188,6 +189,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 8777 - DNS Reverse IP Automatic Multicast Tunneling (AMT) Discovery * 8914 - Extended DNS Errors * 8976 - Message Digest for DNS Zones (ZONEMD RR) +* 9460 - Service Binding and Parameter Specification via the DNS +* 9461 - Service Binding Mapping for DNS Servers +* 9462 - Discovery of Designated Resolvers ## Loosely Based Upon diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 1b58e8f0aa..c1bbdaae2e 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -756,36 +756,48 @@ const ( ExtendedErrorCodeNoReachableAuthority ExtendedErrorCodeNetworkError ExtendedErrorCodeInvalidData + ExtendedErrorCodeSignatureExpiredBeforeValid + ExtendedErrorCodeTooEarly + ExtendedErrorCodeUnsupportedNSEC3IterValue + ExtendedErrorCodeUnableToConformToPolicy + ExtendedErrorCodeSynthesized + ExtendedErrorCodeInvalidQueryType ) // ExtendedErrorCodeToString maps extended error info codes to a human readable // description. var ExtendedErrorCodeToString = map[uint16]string{ - ExtendedErrorCodeOther: "Other", - ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm", - ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type", - ExtendedErrorCodeStaleAnswer: "Stale Answer", - ExtendedErrorCodeForgedAnswer: "Forged Answer", - ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate", - ExtendedErrorCodeDNSBogus: "DNSSEC Bogus", - ExtendedErrorCodeSignatureExpired: "Signature Expired", - ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid", - ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing", - ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing", - ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set", - ExtendedErrorCodeNSECMissing: "NSEC Missing", - ExtendedErrorCodeCachedError: "Cached Error", - ExtendedErrorCodeNotReady: "Not Ready", - ExtendedErrorCodeBlocked: "Blocked", - ExtendedErrorCodeCensored: "Censored", - ExtendedErrorCodeFiltered: "Filtered", - ExtendedErrorCodeProhibited: "Prohibited", - ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", - ExtendedErrorCodeNotAuthoritative: "Not Authoritative", - ExtendedErrorCodeNotSupported: "Not Supported", - ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", - ExtendedErrorCodeNetworkError: "Network Error", - ExtendedErrorCodeInvalidData: "Invalid Data", + ExtendedErrorCodeOther: "Other", + ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm", + ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type", + ExtendedErrorCodeStaleAnswer: "Stale Answer", + ExtendedErrorCodeForgedAnswer: "Forged Answer", + ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate", + ExtendedErrorCodeDNSBogus: "DNSSEC Bogus", + ExtendedErrorCodeSignatureExpired: "Signature Expired", + ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid", + ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing", + ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing", + ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set", + ExtendedErrorCodeNSECMissing: "NSEC Missing", + ExtendedErrorCodeCachedError: "Cached Error", + ExtendedErrorCodeNotReady: "Not Ready", + ExtendedErrorCodeBlocked: "Blocked", + ExtendedErrorCodeCensored: "Censored", + ExtendedErrorCodeFiltered: "Filtered", + ExtendedErrorCodeProhibited: "Prohibited", + ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", + ExtendedErrorCodeNotAuthoritative: "Not Authoritative", + ExtendedErrorCodeNotSupported: "Not Supported", + ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", + ExtendedErrorCodeNetworkError: "Network Error", + ExtendedErrorCodeInvalidData: "Invalid Data", + ExtendedErrorCodeSignatureExpiredBeforeValid: "Signature Expired Before Valid", + ExtendedErrorCodeTooEarly: "Too Early", + ExtendedErrorCodeUnsupportedNSEC3IterValue: "Unsupported NSEC3 Iterations Value", + ExtendedErrorCodeUnableToConformToPolicy: "Unable To Conform To Policy", + ExtendedErrorCodeSynthesized: "Synthesized", + ExtendedErrorCodeInvalidQueryType: "Invalid Query Type", } // StringToExtendedErrorCode is a map from human readable descriptions to diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 7d1ade7d87..c1a76995e7 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -55,7 +55,10 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { sx := []string{} p := 0 for { - i := escapedStringOffset(l.token[p:], 255) + i, ok := escapedStringOffset(l.token[p:], 255) + if !ok { + return nil, &ParseError{err: errstr, lex: l} + } if i != -1 && p+i != len(l.token) { sx = append(sx, l.token[p:p+i]) } else { @@ -1919,29 +1922,36 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { // escapedStringOffset finds the offset within a string (which may contain escape // sequences) that corresponds to a certain byte offset. If the input offset is -// out of bounds, -1 is returned. -func escapedStringOffset(s string, byteOffset int) int { - if byteOffset == 0 { - return 0 +// out of bounds, -1 is returned (which is *not* considered an error). +func escapedStringOffset(s string, desiredByteOffset int) (int, bool) { + if desiredByteOffset == 0 { + return 0, true } - offset := 0 - for i := 0; i < len(s); i++ { - offset += 1 + currentByteOffset, i := 0, 0 + + for i < len(s) { + currentByteOffset += 1 // Skip escape sequences if s[i] != '\\' { - // Not an escape sequence; nothing to do. + // Single plain byte, not an escape sequence. + i++ } else if isDDD(s[i+1:]) { - i += 3 + // Skip backslash and DDD. + i += 4 + } else if len(s[i+1:]) < 1 { + // No character following the backslash; that's an error. + return 0, false } else { - i++ + // Skip backslash and following byte. + i += 2 } - if offset >= byteOffset { - return i + 1 + if currentByteOffset >= desiredByteOffset { + return i, true } } - return -1 + return -1, true } diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 0207d6da22..81580d1e5f 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -188,6 +188,14 @@ type DecorateReader func(Reader) Reader // Implementations should never return a nil Writer. type DecorateWriter func(Writer) Writer +// MsgInvalidFunc is a listener hook for observing incoming messages that were discarded +// because they could not be parsed. +// Every message that is read by a Reader will eventually be provided to the Handler, +// rejected (or ignored) by the MsgAcceptFunc, or passed to this function. +type MsgInvalidFunc func(m []byte, err error) + +func DefaultMsgInvalidFunc(m []byte, err error) {} + // A Server defines parameters for running an DNS server. type Server struct { // Address to listen on, ":dns" if empty. @@ -233,6 +241,8 @@ type Server struct { // AcceptMsgFunc will check the incoming message and will reject it early in the process. // By default DefaultMsgAcceptFunc will be used. MsgAcceptFunc MsgAcceptFunc + // MsgInvalidFunc is optional, will be called if a message is received but cannot be parsed. + MsgInvalidFunc MsgInvalidFunc // Shutdown handling lock sync.RWMutex @@ -277,6 +287,9 @@ func (srv *Server) init() { if srv.MsgAcceptFunc == nil { srv.MsgAcceptFunc = DefaultMsgAcceptFunc } + if srv.MsgInvalidFunc == nil { + srv.MsgInvalidFunc = DefaultMsgInvalidFunc + } if srv.Handler == nil { srv.Handler = DefaultServeMux } @@ -531,6 +544,7 @@ func (srv *Server) serveUDP(l net.PacketConn) error { if cap(m) == srv.UDPSize { srv.udpPool.Put(m[:srv.UDPSize]) } + srv.MsgInvalidFunc(m, ErrShortRead) continue } wg.Add(1) @@ -611,6 +625,7 @@ func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u net.PacketConn func (srv *Server) serveDNS(m []byte, w *response) { dh, off, err := unpackMsgHdr(m, 0) if err != nil { + srv.MsgInvalidFunc(m, err) // Let client hang, they are sending crap; any reply can be used to amplify. return } @@ -620,10 +635,12 @@ func (srv *Server) serveDNS(m []byte, w *response) { switch action := srv.MsgAcceptFunc(dh); action { case MsgAccept: - if req.unpack(dh, m, off) == nil { + err := req.unpack(dh, m, off) + if err == nil { break } + srv.MsgInvalidFunc(m, err) fallthrough case MsgReject, MsgRejectNotImplemented: opcode := req.Opcode diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index c1a740b684..310c7d11f5 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -14,7 +14,7 @@ import ( // SVCBKey is the type of the keys used in the SVCB RR. type SVCBKey uint16 -// Keys defined in draft-ietf-dnsop-svcb-https-08 Section 14.3.2. +// Keys defined in rfc9460 const ( SVCB_MANDATORY SVCBKey = iota SVCB_ALPN @@ -23,7 +23,8 @@ const ( SVCB_IPV4HINT SVCB_ECHCONFIG SVCB_IPV6HINT - SVCB_DOHPATH // draft-ietf-add-svcb-dns-02 Section 9 + SVCB_DOHPATH // rfc9461 Section 5 + SVCB_OHTTP // rfc9540 Section 8 svcb_RESERVED SVCBKey = 65535 ) @@ -37,6 +38,7 @@ var svcbKeyToStringMap = map[SVCBKey]string{ SVCB_ECHCONFIG: "ech", SVCB_IPV6HINT: "ipv6hint", SVCB_DOHPATH: "dohpath", + SVCB_OHTTP: "ohttp", } var svcbStringToKeyMap = reverseSVCBKeyMap(svcbKeyToStringMap) @@ -201,6 +203,8 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue { return new(SVCBIPv6Hint) case SVCB_DOHPATH: return new(SVCBDoHPath) + case SVCB_OHTTP: + return new(SVCBOhttp) case svcb_RESERVED: return nil default: @@ -771,8 +775,8 @@ func (s *SVCBIPv6Hint) copy() SVCBKeyValue { // SVCBDoHPath pair is used to indicate the URI template that the // clients may use to construct a DNS over HTTPS URI. // -// See RFC xxxx (https://datatracker.ietf.org/doc/html/draft-ietf-add-svcb-dns-02) -// and RFC yyyy (https://datatracker.ietf.org/doc/html/draft-ietf-add-ddr-06). +// See RFC 9461 (https://datatracker.ietf.org/doc/html/rfc9461) +// and RFC 9462 (https://datatracker.ietf.org/doc/html/rfc9462). // // A basic example of using the dohpath option together with the alpn // option to indicate support for DNS over HTTPS on a certain path: @@ -816,6 +820,44 @@ func (s *SVCBDoHPath) copy() SVCBKeyValue { } } +// The "ohttp" SvcParamKey is used to indicate that a service described in a SVCB RR +// can be accessed as a target using an associated gateway. +// Both the presentation and wire-format values for the "ohttp" parameter MUST be empty. +// +// See RFC 9460 (https://datatracker.ietf.org/doc/html/rfc9460/) +// and RFC 9230 (https://datatracker.ietf.org/doc/html/rfc9230/) +// +// A basic example of using the dohpath option together with the alpn +// option to indicate support for DNS over HTTPS on a certain path: +// +// s := new(dns.SVCB) +// s.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET} +// e := new(dns.SVCBAlpn) +// e.Alpn = []string{"h2", "h3"} +// p := new(dns.SVCBOhttp) +// s.Value = append(s.Value, e, p) +type SVCBOhttp struct{} + +func (*SVCBOhttp) Key() SVCBKey { return SVCB_OHTTP } +func (*SVCBOhttp) copy() SVCBKeyValue { return &SVCBOhttp{} } +func (*SVCBOhttp) pack() ([]byte, error) { return []byte{}, nil } +func (*SVCBOhttp) String() string { return "" } +func (*SVCBOhttp) len() int { return 0 } + +func (*SVCBOhttp) unpack(b []byte) error { + if len(b) != 0 { + return errors.New("dns: svcbotthp: svcbotthp must have no value") + } + return nil +} + +func (*SVCBOhttp) parse(b string) error { + if b != "" { + return errors.New("dns: svcbotthp: svcbotthp must have no value") + } + return nil +} + // SVCBLocal pair is intended for experimental/private use. The key is recommended // to be in the range [SVCB_PRIVATE_LOWER, SVCB_PRIVATE_UPPER]. // Basic use pattern for creating a keyNNNNN option: diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 8e3129cbd2..7a34c14ca0 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -96,6 +96,7 @@ const ( TypeLP uint16 = 107 TypeEUI48 uint16 = 108 TypeEUI64 uint16 = 109 + TypeNXNAME uint16 = 128 TypeURI uint16 = 256 TypeCAA uint16 = 257 TypeAVC uint16 = 258 @@ -294,6 +295,19 @@ func (*NULL) parse(c *zlexer, origin string) *ParseError { return &ParseError{err: "NULL records do not have a presentation format"} } +// NXNAME is a meta record. See https://www.iana.org/go/draft-ietf-dnsop-compact-denial-of-existence-04 +// Reference: https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml +type NXNAME struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *NXNAME) String() string { return rr.Hdr.String() } + +func (*NXNAME) parse(c *zlexer, origin string) *ParseError { + return &ParseError{err: "NXNAME records do not have a presentation format"} +} + // CNAME RR. See RFC 1034. type CNAME struct { Hdr RR_Header diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index dc34e5902b..00c8629f27 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 58} +var Version = v{1, 1, 62} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 2187c456db..5cfbb516af 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -209,6 +209,7 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { // ch := make(chan *dns.Envelope) // tr := new(dns.Transfer) // var wg sync.WaitGroup +// wg.Add(1) // go func() { // tr.Out(w, r, ch) // wg.Done() diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 03029fb3eb..330c05395f 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -886,6 +886,15 @@ func (r1 *NULL) isDuplicate(_r2 RR) bool { return true } +func (r1 *NXNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NXNAME) + if !ok { + return false + } + _ = r2 + return true +} + func (r1 *NXT) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*NXT) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 39b3bc8102..5a6cf4c6ad 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -706,6 +706,10 @@ func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress b return off, nil } +func (rr *NXNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + return off, nil +} + func (rr *NXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packDomainName(rr.NextDomain, msg, off, compression, false) if err != nil { @@ -2266,6 +2270,13 @@ func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *NXNAME) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + return off, nil +} + func (rr *NXT) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 2c70fc44d6..11f13ecf9c 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -60,6 +60,7 @@ var TypeToRR = map[uint16]func() RR{ TypeNSEC3: func() RR { return new(NSEC3) }, TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, TypeNULL: func() RR { return new(NULL) }, + TypeNXNAME: func() RR { return new(NXNAME) }, TypeNXT: func() RR { return new(NXT) }, TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, TypeOPT: func() RR { return new(OPT) }, @@ -146,6 +147,7 @@ var TypeToString = map[uint16]string{ TypeNSEC3: "NSEC3", TypeNSEC3PARAM: "NSEC3PARAM", TypeNULL: "NULL", + TypeNXNAME: "NXNAME", TypeNXT: "NXT", TypeNone: "None", TypeOPENPGPKEY: "OPENPGPKEY", @@ -230,6 +232,7 @@ func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *NULL) Header() *RR_Header { return &rr.Hdr } +func (rr *NXNAME) Header() *RR_Header { return &rr.Hdr } func (rr *NXT) Header() *RR_Header { return &rr.Hdr } func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr } @@ -594,6 +597,11 @@ func (rr *NULL) len(off int, compression map[string]struct{}) int { return l } +func (rr *NXNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + return l +} + func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) @@ -1107,6 +1115,10 @@ func (rr *NULL) copy() RR { return &NULL{rr.Hdr, rr.Data} } +func (rr *NXNAME) copy() RR { + return &NXNAME{rr.Hdr} +} + func (rr *NXT) copy() RR { return &NXT{*rr.NSEC.copy().(*NSEC)} } diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go new file mode 100644 index 0000000000..8bf537f73b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go @@ -0,0 +1,136 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2024 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/cors" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketCors sets the cors configuration for the bucket +func (c *Client) SetBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if corsConfig == nil { + return c.removeBucketCors(ctx, bucketName) + } + + return c.putBucketCors(ctx, bucketName, corsConfig) +} + +func (c *Client) putBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + corsStr, err := corsConfig.ToXML() + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(corsStr), + contentLength: int64(len(corsStr)), + contentMD5Base64: sumMD5Base64([]byte(corsStr)), + } + + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +func (c *Client) removeBucketCors(ctx context.Context, bucketName string) error { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// GetBucketCors returns the current cors +func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + bucketCors, err := c.getBucketCors(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchCORSConfiguration" { + return nil, nil + } + return nil, err + } + return bucketCors, nil +} + +func (c *Client) getBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, // TODO: needed? copied over from other example, but not spec'd in API. + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + + corsConfig, err := cors.ParseBucketCorsConfig(resp.Body) + if err != nil { + return nil, err + } + + return corsConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 937551403e..13c493d0fb 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -129,7 +129,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.74" + libraryVersion = "v7.0.75" ) // User Agent should always following the below style. diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index 132ea702f7..99b99db9b8 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -91,6 +91,7 @@ type PutObjectPartOptions struct { Md5Base64, Sha256Hex string SSE encrypt.ServerSide CustomHeader, Trailer http.Header + DisableContentSha256 bool } // PutObjectPart - Upload an object part. @@ -107,7 +108,7 @@ func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string sha256Hex: opts.Sha256Hex, size: size, sse: opts.SSE, - streamSha256: true, + streamSha256: !opts.DisableContentSha256, customHeader: opts.CustomHeader, trailer: opts.Trailer, } diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index e77bf9d4ab..871034bc7e 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -52,6 +52,7 @@ import ( "github.com/google/uuid" "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/cors" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/notification" @@ -2972,7 +2973,6 @@ func testGetObjectAttributes() { ContentType: v.ContentType, SendContentMd5: v.SendContentMd5, }) - if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return @@ -7212,7 +7212,6 @@ func testFunctional() { "bucketName": bucketName, } exists, err = c.BucketExists(context.Background(), bucketName) - if err != nil { logError(testName, function, args, startTime, "", "BucketExists failed", err) return @@ -7275,7 +7274,6 @@ func testFunctional() { "bucketPolicy": writeOnlyPolicy, } err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) - if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return @@ -7304,7 +7302,6 @@ func testFunctional() { "bucketPolicy": readWritePolicy, } err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) - if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return @@ -7481,7 +7478,6 @@ func testFunctional() { "fileName": fileName + "-f", } err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err != nil { logError(testName, function, args, startTime, "", "FGetObject failed", err) return @@ -7613,7 +7609,6 @@ func testFunctional() { "reqParams": reqParams, } presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) - if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) return @@ -7770,14 +7765,12 @@ func testFunctional() { "objectName": objectName, } err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) return } args["objectName"] = objectName + "-f" err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) - if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) return @@ -7785,7 +7778,6 @@ func testFunctional() { args["objectName"] = objectName + "-nolength" err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) - if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) return @@ -7793,7 +7785,6 @@ func testFunctional() { args["objectName"] = objectName + "-presigned" err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) - if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) return @@ -7801,7 +7792,6 @@ func testFunctional() { args["objectName"] = objectName + "-presign-custom" err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{}) - if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) return @@ -7813,7 +7803,6 @@ func testFunctional() { "bucketName": bucketName, } err = c.RemoveBucket(context.Background(), bucketName) - if err != nil { logError(testName, function, args, startTime, "", "RemoveBucket failed", err) return @@ -12281,7 +12270,6 @@ func testFunctionalV2() { "bucketPolicy": readWritePolicy, } err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) - if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return @@ -13012,7 +13000,6 @@ func testGetObjectACLContext() { ContentType: "binary/octet-stream", UserMetadata: metaData, }) - if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return @@ -13491,6 +13478,849 @@ func testListObjects() { logSuccess(testName, function, args, startTime) } +// testCors is runnable against S3 itself. +// Just provide the env var MINIO_GO_TEST_BUCKET_CORS with bucket that is public and WILL BE DELETED. +// Recreate this manually each time. Minio-go SDK does not support calling +// SetPublicBucket (put-public-access-block) on S3, otherwise we could script the whole thing. +func testCors() { + ctx := context.Background() + startTime := time.Now() + testName := getFuncName() + function := "SetBucketCors(bucketName, cors)" + args := map[string]interface{}{ + "bucketName": "", + "cors": "", + } + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logFailure(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Create or reuse a bucket that will get cors settings applied to it and deleted when done + bucketName := os.Getenv("MINIO_GO_TEST_BUCKET_CORS") + if bucketName == "" { + bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logFailure(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + } + args["bucketName"] = bucketName + defer cleanupBucket(bucketName, c) + + publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}` + err = c.SetBucketPolicy(ctx, bucketName, publicPolicy) + if err != nil { + logFailure(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // Upload an object for testing. + objectContents := `some-text-file-contents` + reader := strings.NewReader(objectContents) + bufSize := int64(len(objectContents)) + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logFailure(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + bucketURL := c.EndpointURL().String() + "/" + bucketName + "/" + objectURL := bucketURL + objectName + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logFailure(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + httpClient := &http.Client{ + Timeout: 30 * time.Second, + Transport: transport, + } + + errStrAccessForbidden := `AccessForbiddenCORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted` + testCases := []struct { + name string + + // Cors rules to apply + applyCorsRules []cors.Rule + + // Outbound request info + method string + url string + headers map[string]string + + // Wanted response + wantStatus int + wantHeaders map[string]string + wantBodyContains string + }{ + { + name: "apply bucket rules", + applyCorsRules: []cors.Rule{ + { + AllowedOrigin: []string{"https"}, // S3 documents 'https' origin, but it does not actually work, see test below. + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + }, + { + AllowedOrigin: []string{"http://www.example1.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + ExposeHeader: []string{"x-amz-server-side-encryption", "x-amz-request-id"}, + MaxAgeSeconds: 3600, + }, + { + AllowedOrigin: []string{"http://www.example2.com"}, + AllowedMethod: []string{"POST"}, + AllowedHeader: []string{"X-My-Special-Header"}, + ExposeHeader: []string{"X-AMZ-Request-ID"}, + }, + { + AllowedOrigin: []string{"http://www.example3.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"X-Example-3-Special-Header"}, + MaxAgeSeconds: 10, + }, + { + AllowedOrigin: []string{"*"}, + AllowedMethod: []string{"GET"}, + AllowedHeader: []string{"*"}, + ExposeHeader: []string{"x-amz-request-id", "X-AMZ-server-side-encryption"}, + MaxAgeSeconds: 3600, + }, + { + AllowedOrigin: []string{"http://multiplemethodstest.com"}, + AllowedMethod: []string{"POST", "PUT", "DELETE"}, + AllowedHeader: []string{"x-abc-*", "x-def-*"}, + }, + { + AllowedOrigin: []string{"http://UPPERCASEEXAMPLE.com"}, + AllowedMethod: []string{"DELETE"}, + }, + { + AllowedOrigin: []string{"https://*"}, + AllowedMethod: []string{"DELETE"}, + AllowedHeader: []string{"x-abc-*", "x-def-*"}, + }, + }, + }, + { + name: "preflight to object url matches example1 rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "", + }, + }, + { + name: "preflight to bucket url matches example1 rule", + method: http.MethodOptions, + url: bucketURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + }, + }, + { + name: "preflight matches example2 rule with header given", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example2.com", + "Access-Control-Request-Method": "POST", + "Access-Control-Request-Headers": "X-My-Special-Header", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example2.com", + "Access-Control-Allow-Methods": "POST", + "Access-Control-Allow-Headers": "x-my-special-header", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "", + "Content-Length": "0", + }, + }, + { + name: "preflight matches example2 rule with no header given", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example2.com", + "Access-Control-Request-Method": "POST", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example2.com", + "Access-Control-Allow-Methods": "POST", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "", + "Content-Length": "0", + }, + }, + { + name: "preflight matches wildcard origin rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.couldbeanything.com", + "Access-Control-Request-Method": "GET", + "Access-Control-Request-Headers": "x-custom-header,x-other-custom-header", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET", + "Access-Control-Allow-Headers": "x-custom-header,x-other-custom-header", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + }, + }, + { + name: "preflight does not match any rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.couldbeanything.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight does not match example1 rule because of method", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "POST", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "s3 processes cors rules even when request is not preflight if cors headers present test get", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Request-Method": "PUT", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + // "Access-Control-Allow-Methods": "PUT", + // "Access-Control-Max-Age": "3600", + }, + }, + { + name: "s3 processes cors rules even when request is not preflight if cors headers present test put", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "GET", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Origin": "*", + "Access-Control-Expose-Headers": "x-amz-request-id,x-amz-server-side-encryption", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + // "Access-Control-Allow-Methods": "PUT", + // "Access-Control-Max-Age": "3600", + }, + }, + { + name: "s3 processes cors rules even when request is not preflight but there is no rule match", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "get request matches wildcard origin rule and returns cors headers", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-request-id,X-AMZ-server-side-encryption", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "GET", + }, + }, + { + name: "head request does not match rule and returns no cors headers", + method: http.MethodHead, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.nomatchingdomainfound.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put request with origin does not match rule and returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.nomatchingdomainfound.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put request with no origin does not match rule and returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{}, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request with wildcard origin does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.notsecureexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight for delete request with wildcard https origin matches secureexample", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "https://www.secureexample.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request matches secureexample with wildcard https origin and request headers", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "https://www.secureexample.com", + "Access-Control-Allow-Headers": "x-abc-1,x-abc-second,x-def-1", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request matches secureexample rejected because request header does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1,x-does-not-match", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight with https origin is documented by s3 as matching but it does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.securebutdoesnotmatch.com", + "Access-Control-Request-Method": "PUT", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "put no origin no match returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{}, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put with origin match example1 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "put with origin and header match example1 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "x-could-be-anything": "myvalue", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "put no match found returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.unmatchingdomain.com", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put with origin match example3 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example3.com", + "X-My-Special-Header": "myvalue", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example3.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "10", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "preflight matches example1 rule headers case is incorrect", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + // Fetch standard guarantees that these are sent lowercase, here we test what happens when they are not. + "Access-Control-Request-Headers": "X-Another-Header,X-Could-Be-Anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + }, + }, + { + name: "preflight matches example1 rule headers are not sorted", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + // Fetch standard guarantees that these are sorted, test what happens when they are not. + "Access-Control-Request-Headers": "a-customer-header,b-should-be-last", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "a-customer-header,b-should-be-last", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + }, + }, + { + name: "preflight with case sensitivity in origin matches uppercase", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight with case sensitivity in origin does not match when lowercase", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://uppercaseexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight match upper case with unknown header but no header restrictions", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-unknown-1", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight for delete request matches multiplemethodstest.com origin and request headers", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://multiplemethodstest.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://multiplemethodstest.com", + "Access-Control-Allow-Headers": "x-abc-1", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + // S3 returns POST, PUT, DELETE here, MinIO does not as spec does not require it. + // "Access-Control-Allow-Methods": "DELETE", + }, + }, + { + name: "delete request goes ahead because cors is only for browsers and does not block on the server side", + method: http.MethodDelete, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.justrandom.com", + }, + wantStatus: http.StatusNoContent, + }, + } + + for i, test := range testCases { + testName := fmt.Sprintf("%s_%d_%s", testName, i+1, strings.ReplaceAll(test.name, " ", "_")) + + // Apply the CORS rules + if test.applyCorsRules != nil { + corsConfig := &cors.Config{ + CORSRules: test.applyCorsRules, + } + err = c.SetBucketCors(ctx, bucketName, corsConfig) + if err != nil { + logFailure(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) + return + } + } + + // Make request + if test.method != "" && test.url != "" { + req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil) + if err != nil { + logFailure(testName, function, args, startTime, "", "HTTP request creation failed", err) + return + } + req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion) + + for k, v := range test.headers { + req.Header.Set(k, v) + } + resp, err := httpClient.Do(req) + if err != nil { + logFailure(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + defer resp.Body.Close() + + // Check returned status code + if resp.StatusCode != test.wantStatus { + errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode) + logFailure(testName, function, args, startTime, "", errStr, nil) + return + } + + // Check returned body + if test.wantBodyContains != "" { + body, err := io.ReadAll(resp.Body) + if err != nil { + logFailure(testName, function, args, startTime, "", "Failed to read response body", err) + return + } + if !strings.Contains(string(body), test.wantBodyContains) { + errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body)) + logFailure(testName, function, args, startTime, "", errStr, nil) + return + } + } + + // Check returned response headers + for k, v := range test.wantHeaders { + gotVal := resp.Header.Get(k) + if k == "Access-Control-Expose-Headers" { + // MinIO returns this in canonical form, S3 does not. + gotVal = strings.ToLower(gotVal) + v = strings.ToLower(v) + } + // Remove all spaces, S3 adds spaces after CSV values in headers, MinIO does not. + gotVal = strings.ReplaceAll(gotVal, " ", "") + if gotVal != v { + errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal) + logFailure(testName, function, args, startTime, "", errStr, nil) + return + } + } + } + logSuccess(testName, function, args, startTime) + } + logSuccess(testName, function, args, startTime) +} + +func testCorsSetGetDelete() { + ctx := context.Background() + startTime := time.Now() + testName := getFuncName() + function := "SetBucketCors(bucketName, cors)" + args := map[string]interface{}{ + "bucketName": "", + "cors": "", + } + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logFailure(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logFailure(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Set the CORS rules on the new bucket + corsRules := []cors.Rule{ + { + AllowedOrigin: []string{"http://www.example1.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + }, + { + AllowedOrigin: []string{"http://www.example2.com"}, + AllowedMethod: []string{"POST"}, + AllowedHeader: []string{"X-My-Special-Header"}, + }, + { + AllowedOrigin: []string{"*"}, + AllowedMethod: []string{"GET"}, + AllowedHeader: []string{"*"}, + }, + } + corsConfig := cors.NewConfig(corsRules) + err = c.SetBucketCors(ctx, bucketName, corsConfig) + if err != nil { + logFailure(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) + return + } + + // Get the rules and check they match what we set + gotCorsConfig, err := c.GetBucketCors(ctx, bucketName) + if err != nil { + logFailure(testName, function, args, startTime, "", "GetBucketCors failed", err) + return + } + if !reflect.DeepEqual(corsConfig, gotCorsConfig) { + msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig) + logFailure(testName, function, args, startTime, "", msg, nil) + return + } + + // Delete the rules + err = c.SetBucketCors(ctx, bucketName, nil) + if err != nil { + logFailure(testName, function, args, startTime, "", "SetBucketCors failed to delete", err) + return + } + + // Get the rules and check they are now empty + gotCorsConfig, err = c.GetBucketCors(ctx, bucketName) + if err != nil { + logFailure(testName, function, args, startTime, "", "GetBucketCors failed", err) + return + } + if gotCorsConfig != nil { + logFailure(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil) + return + } + + logSuccess(testName, function, args, startTime) +} + // Test deleting multiple objects with object retention set in Governance mode func testRemoveObjects() { // initialize logging params @@ -13627,6 +14457,245 @@ func testRemoveObjects() { logSuccess(testName, function, args, startTime) } +// Test get bucket tags +func testGetBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetBucketTagging(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + logError(testName, function, args, startTime, "", "Invalid error from server failed", err) + return + } + + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test setting tags for bucket +func testSetBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketTagging(bucketName, tags)" + args := map[string]interface{}{ + "bucketName": "", + "tags": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + tag := randString(60, rand.NewSource(time.Now().UnixNano()), "") + expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + t, err := tags.MapToBucketTags(map[string]string{ + tag: expectedValue, + }) + args["tags"] = t.String() + if err != nil { + logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err) + return + } + + err = c.SetBucketTagging(context.Background(), bucketName, t) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketTagging failed", err) + return + } + + tagging, err := c.GetBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketTagging failed", err) + return + } + + if tagging.ToMap()[tag] != expectedValue { + msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue) + logError(testName, function, args, startTime, "", msg, err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test removing bucket tags +func testRemoveBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveBucketTagging(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + tag := randString(60, rand.NewSource(time.Now().UnixNano()), "") + expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + t, err := tags.MapToBucketTags(map[string]string{ + tag: expectedValue, + }) + if err != nil { + logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err) + return + } + + err = c.SetBucketTagging(context.Background(), bucketName, t) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketTagging failed", err) + return + } + + tagging, err := c.GetBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketTagging failed", err) + return + } + + if tagging.ToMap()[tag] != expectedValue { + msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue) + logError(testName, function, args, startTime, "", msg, err) + return + } + + err = c.RemoveBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucketTagging failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + // Convert string to bool and always return false if any error func mustParseBool(str string) bool { b, err := strconv.ParseBool(str) @@ -13660,6 +14729,8 @@ func main() { // execute tests if isFullMode() { + testCorsSetGetDelete() + testCors() testListMultipartUpload() testGetObjectAttributes() testGetObjectAttributesErrorCases() @@ -13731,6 +14802,9 @@ func main() { testObjectTaggingWithVersioning() testTrailingChecksums() testPutObjectWithAutomaticChecksums() + testGetBucketTagging() + testSetBucketTagging() + testRemoveBucketTagging() // SSE-C tests will only work over TLS connection. if tls { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go new file mode 100644 index 0000000000..e71864ee93 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go @@ -0,0 +1,91 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cors + +import ( + "encoding/xml" + "fmt" + "io" + "strings" + + "github.com/dustin/go-humanize" +) + +const defaultXMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" + +// Config is the container for a CORS configuration for a bucket. +type Config struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []Rule `xml:"CORSRule"` +} + +// Rule is a single rule in a CORS configuration. +type Rule struct { + AllowedHeader []string `xml:"AllowedHeader,omitempty"` + AllowedMethod []string `xml:"AllowedMethod,omitempty"` + AllowedOrigin []string `xml:"AllowedOrigin,omitempty"` + ExposeHeader []string `xml:"ExposeHeader,omitempty"` + ID string `xml:"ID,omitempty"` + MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty"` +} + +// NewConfig creates a new CORS configuration with the given rules. +func NewConfig(rules []Rule) *Config { + return &Config{ + XMLNS: defaultXMLNS, + XMLName: xml.Name{ + Local: "CORSConfiguration", + Space: defaultXMLNS, + }, + CORSRules: rules, + } +} + +// ParseBucketCorsConfig parses a CORS configuration in XML from an io.Reader. +func ParseBucketCorsConfig(reader io.Reader) (*Config, error) { + var c Config + + // Max size of cors document is 64KiB according to https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html + // This limiter is just for safety so has a max of 128KiB + err := xml.NewDecoder(io.LimitReader(reader, 128*humanize.KiByte)).Decode(&c) + if err != nil { + return nil, fmt.Errorf("decoding xml: %w", err) + } + if c.XMLNS == "" { + c.XMLNS = defaultXMLNS + } + for i, rule := range c.CORSRules { + for j, method := range rule.AllowedMethod { + c.CORSRules[i].AllowedMethod[j] = strings.ToUpper(method) + } + } + return &c, nil +} + +// ToXML marshals the CORS configuration to XML. +func (c Config) ToXML() ([]byte, error) { + if c.XMLNS == "" { + c.XMLNS = defaultXMLNS + } + data, err := xml.Marshal(&c) + if err != nil { + return nil, fmt.Errorf("marshaling xml: %w", err) + } + return append([]byte(xml.Header), data...), nil +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go index f365157eea..f7fad19f6a 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-error.go +++ b/vendor/github.com/minio/minio-go/v7/s3-error.go @@ -57,5 +57,6 @@ var s3ErrorResponseMap = map[string]string{ "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", "InvalidDuration": "Duration provided in the request is invalid.", "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + "NoSuchCORSConfiguration": "The specified bucket does not have a CORS configuration.", // Add new API errors here. } diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee..b9cc55abbb 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index 1cfe8d863c..cddf027fda 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -475,9 +475,9 @@ type API interface { // Flags returns the flag values that Prometheus was launched with. Flags(ctx context.Context) (FlagsResult, error) // LabelNames returns the unique label names present in the block in sorted order by given time range and matchers. - LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) + LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]string, Warnings, error) // LabelValues performs a query for the values of the given label, time range and matchers. - LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) + LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelValues, Warnings, error) // Query performs a query for the given time. Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) // QueryRange performs a query for the given range. @@ -489,7 +489,7 @@ type API interface { // Runtimeinfo returns the various runtime information properties about the Prometheus server. Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) // Series finds series by label matchers. - Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) + Series(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]model.LabelSet, Warnings, error) // Snapshot creates a snapshot of all current data into snapshots/- // under the TSDB's data directory and returns the directory as response. Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) @@ -502,7 +502,7 @@ type API interface { // Metadata returns metadata about metrics currently scraped by the metric name. Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) // TSDB returns the cardinality statistics. - TSDB(ctx context.Context) (TSDBResult, error) + TSDB(ctx context.Context, opts ...Option) (TSDBResult, error) // WalReplay returns the current replay status of the wal. WalReplay(ctx context.Context) (WalReplayStatus, error) } @@ -1024,9 +1024,10 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { return res, err } -func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) { +func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]string, Warnings, error) { u := h.client.URL(epLabels, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) + if !startTime.IsZero() { q.Set("start", formatTime(startTime)) } @@ -1046,9 +1047,10 @@ func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, e return labelNames, w, err } -func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) { +func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelValues, Warnings, error) { u := h.client.URL(epLabelValues, map[string]string{"name": label}) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) + if !startTime.IsZero() { q.Set("start", formatTime(startTime)) } @@ -1076,6 +1078,7 @@ func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []strin type apiOptions struct { timeout time.Duration + limit uint64 } type Option func(c *apiOptions) @@ -1088,20 +1091,35 @@ func WithTimeout(timeout time.Duration) Option { } } -func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) { - u := h.client.URL(epQuery, nil) - q := u.Query() +// WithLimit provides an optional maximum number of returned entries for APIs that support limit parameter +// e.g. https://prometheus.io/docs/prometheus/latest/querying/api/#instant-querie:~:text=%3A%20End%20timestamp.-,limit%3D%3Cnumber%3E,-%3A%20Maximum%20number%20of +func WithLimit(limit uint64) Option { + return func(o *apiOptions) { + o.limit = limit + } +} +func addOptionalURLParams(q url.Values, opts []Option) url.Values { opt := &apiOptions{} for _, o := range opts { o(opt) } - d := opt.timeout - if d > 0 { - q.Set("timeout", d.String()) + if opt.timeout > 0 { + q.Set("timeout", opt.timeout.String()) } + if opt.limit > 0 { + q.Set("limit", strconv.FormatUint(opt.limit, 10)) + } + + return q +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) { + u := h.client.URL(epQuery, nil) + q := addOptionalURLParams(u.Query(), opts) + q.Set("query", query) if !ts.IsZero() { q.Set("time", formatTime(ts)) @@ -1118,36 +1136,25 @@ func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts .. func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error) { u := h.client.URL(epQueryRange, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) q.Set("query", query) q.Set("start", formatTime(r.Start)) q.Set("end", formatTime(r.End)) q.Set("step", strconv.FormatFloat(r.Step.Seconds(), 'f', -1, 64)) - opt := &apiOptions{} - for _, o := range opts { - o(opt) - } - - d := opt.timeout - if d > 0 { - q.Set("timeout", d.String()) - } - _, body, warnings, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, warnings, err } var qres queryResult - return qres.v, warnings, json.Unmarshal(body, &qres) } -func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) { +func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]model.LabelSet, Warnings, error) { u := h.client.URL(epSeries, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) for _, m := range matches { q.Add("match[]", m) @@ -1166,8 +1173,7 @@ func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTi } var mset []model.LabelSet - err = json.Unmarshal(body, &mset) - return mset, warnings, err + return mset, warnings, json.Unmarshal(body, &mset) } func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { @@ -1278,8 +1284,10 @@ func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[strin return res, err } -func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) { +func (h *httpAPI) TSDB(ctx context.Context, opts ...Option) (TSDBResult, error) { u := h.client.URL(epTSDB, nil) + q := addOptionalURLParams(u.Query(), opts) + u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 0000000000..65d761bc9f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 0000000000..8547c8dfd1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 0000000000..2e45780b74 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa10e..cc4ef1077e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0..520cbd7d41 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f4..5117464172 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395..8d35f2d8ae 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,15 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + // If exemplars are not configured, the cap will be 0. + // So append is not needed in this case. + if cap(h.nativeExemplars.exemplars) > 0 { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1122,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1135,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1373,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1654,142 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + ttl time.Duration + exemplars []*dto.Exemplar +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if cap(n.exemplars) == 0 { + return + } + + n.Lock() + defer n.Unlock() + + // The index where to insert the new exemplar. + var nIdx int = -1 + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + rIdx int // The index where to remove the old exemplar. + + ot = time.Now() // Oldest timestamp seen. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + mdIdx = -1 // Index of the older exemplar within the closest pair. + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if *e.Value <= *exemplar.Value && nIdx == -1 { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + mdIdx = i + } else { + mdIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + md = diff + mdIdx = nIdx + if n.exemplars[nIdx-1].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { + mdIdx = nIdx - 1 + } + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + mdIdx = nIdx + if n.exemplars[nIdx].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { + mdIdx = nIdx + } + } + } + rIdx = mdIdx + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d644..a4fa6eabd7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237..9d9b81ab44 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed..62a4e7ad9a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ceea..14d56d2d06 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83..315eab5f17 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbea..e598e66e68 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a0..c6fd2f58b7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446..1ab0e47965 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go index f52ad9eab6..e1441598da 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -30,4 +30,5 @@ var defaultValidations = []Validation{ validations.LintReservedChars, validations.LintCamelCase, validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 0000000000..fdc1e62394 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "fmt" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, fmt.Errorf("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go index bc8dbd1e16..de52cfee44 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -44,21 +44,21 @@ func LintMetricUnits(mf *dto.MetricFamily) []error { return problems } -// LintMetricTypeInName detects when metric types are included in the metric name. +// LintMetricTypeInName detects when the metric type is included in the metric name. func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + var problems []error - n := strings.ToLower(mf.GetName()) - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) - } + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) } + return problems } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 9dce15eafa..e0ac346665 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -42,9 +42,8 @@ import ( "fmt" "io" "net/http" - "reflect" - "github.com/davecgh/go-spew/spew" + "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "google.golang.org/protobuf/proto" @@ -184,9 +183,8 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err return compareMetricFamilies(scraped, wanted, metricNames...) } -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -222,6 +220,31 @@ func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected return compareMetricFamilies(got, wanted, metricNames...) } +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { @@ -254,6 +277,15 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str if metricNames != nil { got = filterMetrics(got, metricNames) expected = filterMetrics(expected, metricNames) + if len(metricNames) > len(got) { + var missingMetricNames []string + for _, name := range metricNames { + if ok := hasMetricByName(got, name); !ok { + missingMetricNames = append(missingMetricNames, name) + } + } + return fmt.Errorf("expected metric name(s) not found: %v", missingMetricNames) + } } return compare(got, expected) @@ -277,73 +309,12 @@ func compare(got, want []*dto.MetricFamily) error { return fmt.Errorf("encoding expected metrics failed: %w", err) } } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { return fmt.Errorf(diffErr) } return nil } -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { @@ -356,3 +327,12 @@ func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFam } return filtered } + +func hasMetricByName(metrics []*dto.MetricFamily, name string) bool { + for _, mf := range metrics { + if mf.GetName() == name { + return true + } + } + return false +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f8..2c808eece0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go index 4a0be4a10e..7276742ec9 100644 --- a/vendor/github.com/prometheus/common/config/headers.go +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -52,14 +52,6 @@ var reservedHeaders = map[string]struct{}{ // Headers represents the configuration for HTTP headers. type Headers struct { Headers map[string]Header `yaml:",inline"` - dir string -} - -// Header represents the configuration for a single HTTP header. -type Header struct { - Values []string `yaml:"values,omitempty" json:"values,omitempty"` - Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` - Files []string `yaml:"files,omitempty" json:"files,omitempty"` } func (h Headers) MarshalJSON() ([]byte, error) { @@ -67,32 +59,40 @@ func (h Headers) MarshalJSON() ([]byte, error) { return json.Marshal(h.Headers) } -// SetDirectory records the directory to make headers file relative to the -// configuration file. +// SetDirectory make headers file relative to the configuration file. func (h *Headers) SetDirectory(dir string) { if h == nil { return } - h.dir = dir + for _, h := range h.Headers { + h.SetDirectory(dir) + } } // Validate validates the Headers config. func (h *Headers) Validate() error { - for n, header := range h.Headers { + for n := range h.Headers { if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) } - for _, v := range header.Files { - f := JoinDir(h.dir, v) - _, err := os.ReadFile(f) - if err != nil { - return fmt.Errorf("unable to read header %q from file %s: %w", http.CanonicalHeaderKey(n), f, err) - } - } } return nil } +// Header represents the configuration for a single HTTP header. +type Header struct { + Values []string `yaml:"values,omitempty" json:"values,omitempty"` + Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Files []string `yaml:"files,omitempty" json:"files,omitempty"` +} + +// SetDirectory makes headers file relative to the configuration file. +func (h *Header) SetDirectory(dir string) { + for i := range h.Files { + h.Files[i] = JoinDir(dir, h.Files[i]) + } +} + // NewHeadersRoundTripper returns a RoundTripper that sets HTTP headers on // requests as configured. func NewHeadersRoundTripper(config *Headers, next http.RoundTripper) http.RoundTripper { @@ -121,10 +121,9 @@ func (rt *headersRoundTripper) RoundTrip(req *http.Request) (*http.Response, err req.Header.Add(n, string(v)) } for _, v := range h.Files { - f := JoinDir(rt.config.dir, v) - b, err := os.ReadFile(f) + b, err := os.ReadFile(v) if err != nil { - return nil, fmt.Errorf("unable to read headers file %s: %w", f, err) + return nil, fmt.Errorf("unable to read headers file %s: %w", v, err) } req.Header.Add(n, strings.TrimSpace(string(b))) } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 3e32013477..b640b89953 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -828,7 +828,7 @@ type basicAuthRoundTripper struct { // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has // already been set. -func NewBasicAuthRoundTripper(username SecretReader, password SecretReader, rt http.RoundTripper) http.RoundTripper { +func NewBasicAuthRoundTripper(username, password SecretReader, rt http.RoundTripper) http.RoundTripper { return &basicAuthRoundTripper{username, password, rt} } @@ -964,7 +964,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro } rt.mtx.Lock() - rt.lastSecret = secret + rt.lastSecret = newSecret rt.lastRT.Source = source if rt.client != nil { rt.client.CloseIdleConnections() diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa2164..1448439b7f 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d9..cf0c150c2e 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd17..14034a673a 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,12 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, fmt.Errorf("unknown open metrics version string") } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f9..11c8ff4b9d 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9e..4b86434b33 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211af..25db4f2151 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,7 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + currentMetricIsInsideBraces bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +139,14 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +162,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +282,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +296,38 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMF != nil && p.currentMF.GetName() != p.currentToken.String() { + p.parseError(fmt.Sprintf("multiple metric names %s %s", p.currentMF.GetName(), p.currentToken.String())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + p.currentMetric = &dto.Metric{} + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +337,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +380,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +407,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +628,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +655,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +705,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +769,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +828,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22ff..73b7aa3e60 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a59..c44f93f314 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -161,7 +161,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +176,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +208,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +230,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +240,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,10 +256,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } @@ -283,7 +283,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -309,7 +309,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") @@ -452,6 +452,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index c924e30989..173689d6af 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -1085,8 +1085,9 @@ func (m RemoteWriteProtoMsgs) String() string { } var ( - // RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf - // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/. + // RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf + // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/, + // which will eventually be deprecated. // // NOTE: This string is used for both HTTP header values and config value, so don't change // this reference. diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 194494b6a9..2424b26ab7 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -45,25 +45,24 @@ type mergeGenericQuerier struct { // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { + primaries = filterQueriers(primaries) + secondaries = filterQueriers(secondaries) + switch { - case len(primaries)+len(secondaries) == 0: + case len(primaries) == 0 && len(secondaries) == 0: return noopQuerier{} case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] case len(primaries) == 0 && len(secondaries) == 1: - return secondaries[0] + return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])} } queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { - if _, ok := q.(noopQuerier); !ok && q != nil { - queriers = append(queriers, newGenericQuerierFrom(q)) - } + queriers = append(queriers, newGenericQuerierFrom(q)) } for _, q := range secondaries { - if _, ok := q.(noopQuerier); !ok && q != nil { - queriers = append(queriers, newSecondaryQuerierFrom(q)) - } + queriers = append(queriers, newSecondaryQuerierFrom(q)) } concurrentSelect := false @@ -77,31 +76,40 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer }} } +func filterQueriers(qs []Querier) []Querier { + ret := make([]Querier, 0, len(qs)) + for _, q := range qs { + if _, ok := q.(noopQuerier); !ok && q != nil { + ret = append(ret, q) + } + } + return ret +} + // NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers. // See NewFanout commentary to learn more about primary vs secondary differences. // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { + primaries = filterChunkQueriers(primaries) + secondaries = filterChunkQueriers(secondaries) + switch { case len(primaries) == 0 && len(secondaries) == 0: return noopChunkQuerier{} case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] case len(primaries) == 0 && len(secondaries) == 1: - return secondaries[0] + return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])} } queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { - if _, ok := q.(noopChunkQuerier); !ok && q != nil { - queriers = append(queriers, newGenericQuerierFromChunk(q)) - } + queriers = append(queriers, newGenericQuerierFromChunk(q)) } - for _, querier := range secondaries { - if _, ok := querier.(noopChunkQuerier); !ok && querier != nil { - queriers = append(queriers, newSecondaryQuerierFromChunk(querier)) - } + for _, q := range secondaries { + queriers = append(queriers, newSecondaryQuerierFromChunk(q)) } concurrentSelect := false @@ -115,6 +123,16 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica }} } +func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier { + ret := make([]ChunkQuerier, 0, len(qs)) + for _, q := range qs { + if _, ok := q.(noopChunkQuerier); !ok && q != nil { + ret = append(ret, q) + } + } + return ret +} + // Select returns a set of series that matches the given label matchers. func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index eff44c6060..17caf7be9b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -14,7 +14,6 @@ package remote import ( - "bufio" "bytes" "context" "fmt" @@ -235,12 +234,12 @@ type RecoverableError struct { // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // and encoded bytes from codec.go. -func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { +func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteResponseStats, error) { httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req)) if err != nil { // Errors from NewRequest are from unparsable URLs, so are not // recoverable. - return err + return WriteResponseStats{}, err } httpReq.Header.Add("Content-Encoding", string(c.writeCompression)) @@ -267,28 +266,34 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { if err != nil { // Errors from Client.Do are from (for example) network errors, so are // recoverable. - return RecoverableError{err, defaultBackoff} + return WriteResponseStats{}, RecoverableError{err, defaultBackoff} } defer func() { io.Copy(io.Discard, httpResp.Body) httpResp.Body.Close() }() + // TODO(bwplotka): Pass logger and emit debug on error? + // Parsing error means there were some response header values we can't parse, + // we can continue handling. + rs, _ := ParseWriteResponseStats(httpResp) + //nolint:usestdlibvars - if httpResp.StatusCode/100 != 2 { - scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen)) - line := "" - if scanner.Scan() { - line = scanner.Text() - } - err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) + if httpResp.StatusCode/100 == 2 { + return rs, nil } + + // Handling errors e.g. read potential error in the body. + // TODO(bwplotka): Pass logger and emit debug on error? + body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen)) + err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body) + //nolint:usestdlibvars if httpResp.StatusCode/100 == 5 || (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { - return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} + return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} } - return err + return rs, err } // retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 5bafb9da20..5b59288e6c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -391,7 +391,7 @@ func (m *queueManagerMetrics) unregister() { // external timeseries database. type WriteClient interface { // Store stores the given samples in the remote storage. - Store(ctx context.Context, req []byte, retryAttempt int) error + Store(ctx context.Context, req []byte, retryAttempt int) (WriteResponseStats, error) // Name uniquely identifies the remote storage. Name() string // Endpoint is the remote read or write endpoint for the storage client. @@ -597,14 +597,15 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p } begin := time.Now() - err := t.storeClient.Store(ctx, req, try) + // Ignoring WriteResponseStats, because there is nothing for metadata, since it's + // embedded in v2 calls now, and we do v1 here. + _, err := t.storeClient.Store(ctx, req, try) t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) if err != nil { span.RecordError(err) return err } - return nil } @@ -1661,8 +1662,8 @@ func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sen func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error { begin := time.Now() - err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc) - s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, time.Since(begin)) + rs, err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc) + s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, rs, time.Since(begin)) return err } @@ -1670,17 +1671,29 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s // See https://github.com/prometheus/prometheus/issues/14409 func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error { begin := time.Now() - err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc) - s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, time.Since(begin)) + rs, err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc) + s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, rs, time.Since(begin)) return err } -func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, duration time.Duration) { +func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, rs WriteResponseStats, duration time.Duration) { + // Partial errors may happen -- account for that. + sampleDiff := sampleCount - rs.Samples + if sampleDiff > 0 { + s.qm.metrics.failedSamplesTotal.Add(float64(sampleDiff)) + } + histogramDiff := histogramCount - rs.Histograms + if histogramDiff > 0 { + s.qm.metrics.failedHistogramsTotal.Add(float64(histogramDiff)) + } + exemplarDiff := exemplarCount - rs.Exemplars + if exemplarDiff > 0 { + s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff)) + } if err != nil { - level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err) - s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) - s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount)) - s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount)) + level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) + } else if sampleDiff+exemplarDiff+histogramDiff > 0 { + level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) } // These counters are used to calculate the dynamic sharding, and as such @@ -1688,6 +1701,7 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount)) s.qm.dataOutDuration.incr(int64(duration)) s.qm.lastSendTimestamp.Store(time.Now().Unix()) + // Pending samples/exemplars/histograms also should be subtracted, as an error means // they will not be retried. s.qm.metrics.pendingSamples.Sub(float64(sampleCount)) @@ -1699,19 +1713,29 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) (WriteResponseStats, error) { // Build the WriteRequest with no metadata. req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will // only error if marshaling the proto to bytes fails. - return err + return WriteResponseStats{}, err } reqSize := len(req) *buf = req + // Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need + // to track the total amount of accepted data across the various attempts. + accumulatedStats := WriteResponseStats{} + var accumulatedStatsMu sync.Mutex + addStats := func(rs WriteResponseStats) { + accumulatedStatsMu.Lock() + accumulatedStats = accumulatedStats.Add(rs) + accumulatedStatsMu.Unlock() + } + // An anonymous function allows us to defer the completion of our per-try spans // without causing a memory leak, and it has the nice effect of not propagating any // parameters for sendSamplesWithBackoff/3. @@ -1759,15 +1783,19 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount)) - err := s.qm.client().Store(ctx, *buf, try) + // Technically for v1, we will likely have empty response stats, but for + // newer Receivers this might be not, so used it in a best effort. + rs, err := s.qm.client().Store(ctx, *buf, try) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) + // TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error + // so far we don't have those, so it's ok to potentially skew statistics. + addStats(rs) - if err != nil { - span.RecordError(err) - return err + if err == nil { + return nil } - - return nil + span.RecordError(err) + return err } onRetry := func() { @@ -1780,29 +1808,48 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti if errors.Is(err, context.Canceled) { // When there is resharding, we cancel the context for this queue, which means the data is not sent. // So we exit early to not update the metrics. - return err + return accumulatedStats, err } s.qm.metrics.sentBytesTotal.Add(float64(reqSize)) s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) - return err + if err == nil && !accumulatedStats.Confirmed { + // No 2.0 response headers, and we sent v1 message, so likely it's 1.0 Receiver. + // Assume success, don't rely on headers. + return WriteResponseStats{ + Samples: sampleCount, + Histograms: histogramCount, + Exemplars: exemplarCount, + }, nil + } + return accumulatedStats, err } // sendV2Samples to the remote storage with backoff for recoverable errors. -func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error { +func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) (WriteResponseStats, error) { // Build the WriteRequest with no metadata. req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will // only error if marshaling the proto to bytes fails. - return err + return WriteResponseStats{}, err } reqSize := len(req) *buf = req + // Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need + // to track the total amount of accepted data across the various attempts. + accumulatedStats := WriteResponseStats{} + var accumulatedStatsMu sync.Mutex + addStats := func(rs WriteResponseStats) { + accumulatedStatsMu.Lock() + accumulatedStats = accumulatedStats.Add(rs) + accumulatedStatsMu.Unlock() + } + // An anonymous function allows us to defer the completion of our per-try spans // without causing a memory leak, and it has the nice effect of not propagating any // parameters for sendSamplesWithBackoff/3. @@ -1850,15 +1897,28 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount)) - err := s.qm.client().Store(ctx, *buf, try) + rs, err := s.qm.client().Store(ctx, *buf, try) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) + // TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error + // so far we don't have those, so it's ok to potentially skew statistics. + addStats(rs) - if err != nil { - span.RecordError(err) - return err + if err == nil { + // Check the case mentioned in PRW 2.0 + // https://prometheus.io/docs/specs/remote_write_spec_2_0/#required-written-response-headers. + if sampleCount+histogramCount+exemplarCount > 0 && rs.NoDataWritten() { + err = fmt.Errorf("sent v2 request with %v samples, %v histograms and %v exemplars; got 2xx, but PRW 2.0 response header statistics indicate %v samples, %v histograms and %v exemplars were accepted;"+ + " assumining failure e.g. the target only supports PRW 1.0 prometheus.WriteRequest, but does not check the Content-Type header correctly", + sampleCount, histogramCount, exemplarCount, + rs.Samples, rs.Histograms, rs.Exemplars, + ) + span.RecordError(err) + return err + } + return nil } - - return nil + span.RecordError(err) + return err } onRetry := func() { @@ -1871,13 +1931,12 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 if errors.Is(err, context.Canceled) { // When there is resharding, we cancel the context for this queue, which means the data is not sent. // So we exit early to not update the metrics. - return err + return accumulatedStats, err } s.qm.metrics.sentBytesTotal.Add(float64(reqSize)) s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) - - return err + return accumulatedStats, err } func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go index 2a00ce897f..ffc64c9c3f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -202,16 +202,34 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re return err } - chunks := h.getChunkSeriesSet(ctx, query, filteredMatchers) - if err := chunks.Err(); err != nil { + querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) + if err != nil { return err } + defer func() { + if err := querier.Close(); err != nil { + level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) + } + }() + + var hints *storage.SelectHints + if query.Hints != nil { + hints = &storage.SelectHints{ + Start: query.Hints.StartMs, + End: query.Hints.EndMs, + Step: query.Hints.StepMs, + Func: query.Hints.Func, + Grouping: query.Hints.Grouping, + Range: query.Hints.RangeMs, + By: query.Hints.By, + } + } ws, err := StreamChunkedReadResponses( NewChunkedWriter(w, f), int64(i), // The streaming API has to provide the series sorted. - chunks, + querier.Select(ctx, true, hints, filteredMatchers...), sortedExternalLabels, h.remoteReadMaxBytesInFrame, h.marshalPool, @@ -236,35 +254,6 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } } -// getChunkSeriesSet executes a query to retrieve a ChunkSeriesSet, -// encapsulating the operation in its own function to ensure timely release of -// the querier resources. -func (h *readHandler) getChunkSeriesSet(ctx context.Context, query *prompb.Query, filteredMatchers []*labels.Matcher) storage.ChunkSeriesSet { - querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) - if err != nil { - return storage.ErrChunkSeriesSet(err) - } - defer func() { - if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) - } - }() - - var hints *storage.SelectHints - if query.Hints != nil { - hints = &storage.SelectHints{ - Start: query.Hints.StartMs, - End: query.Hints.EndMs, - Step: query.Hints.StepMs, - Func: query.Hints.Func, - Grouping: query.Hints.Grouping, - Range: query.Hints.RangeMs, - By: query.Hints.By, - } - } - return querier.Select(ctx, true, hints, filteredMatchers...) -} - // filterExtLabelsFromMatchers change equality matchers which match external labels // to a matcher that looks for an empty label, // as that label should not be present in the storage. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/stats.go b/vendor/github.com/prometheus/prometheus/storage/remote/stats.go new file mode 100644 index 0000000000..89d00ffc31 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/stats.go @@ -0,0 +1,107 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "errors" + "net/http" + "strconv" +) + +const ( + rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written" + rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written" + rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written" +) + +// WriteResponseStats represents the response write statistics specified in https://github.com/prometheus/docs/pull/2486 +type WriteResponseStats struct { + // Samples represents X-Prometheus-Remote-Write-Written-Samples + Samples int + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + Histograms int + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + Exemplars int + + // Confirmed means we can trust those statistics from the point of view + // of the PRW 2.0 spec. When parsed from headers, it means we got at least one + // response header from the Receiver to confirm those numbers, meaning it must + // be a at least 2.0 Receiver. See ParseWriteResponseStats for details. + Confirmed bool +} + +// NoDataWritten returns true if statistics indicate no data was written. +func (s WriteResponseStats) NoDataWritten() bool { + return (s.Samples + s.Histograms + s.Exemplars) == 0 +} + +// AllSamples returns both float and histogram sample numbers. +func (s WriteResponseStats) AllSamples() int { + return s.Samples + s.Histograms +} + +// Add returns the sum of this WriteResponseStats plus the given WriteResponseStats. +func (s WriteResponseStats) Add(rs WriteResponseStats) WriteResponseStats { + s.Confirmed = rs.Confirmed + s.Samples += rs.Samples + s.Histograms += rs.Histograms + s.Exemplars += rs.Exemplars + return s +} + +// SetHeaders sets response headers in a given response writer. +// Make sure to use it before http.ResponseWriter.WriteHeader and .Write. +func (s WriteResponseStats) SetHeaders(w http.ResponseWriter) { + h := w.Header() + h.Set(rw20WrittenSamplesHeader, strconv.Itoa(s.Samples)) + h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.Histograms)) + h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.Exemplars)) +} + +// ParseWriteResponseStats returns WriteResponseStats parsed from the response headers. +// +// As per 2.0 spec, missing header means 0. However, abrupt HTTP errors, 1.0 Receivers +// or buggy 2.0 Receivers might result in no response headers specified and that +// might NOT necessarily mean nothing was written. To represent that we set +// s.Confirmed = true only when see at least on response header. +// +// Error is returned when any of the header fails to parse as int64. +func ParseWriteResponseStats(r *http.Response) (s WriteResponseStats, err error) { + var ( + errs []error + h = r.Header + ) + if v := h.Get(rw20WrittenSamplesHeader); v != "" { // Empty means zero. + s.Confirmed = true + if s.Samples, err = strconv.Atoi(v); err != nil { + s.Samples = 0 + errs = append(errs, err) + } + } + if v := h.Get(rw20WrittenHistogramsHeader); v != "" { // Empty means zero. + s.Confirmed = true + if s.Histograms, err = strconv.Atoi(v); err != nil { + s.Histograms = 0 + errs = append(errs, err) + } + } + if v := h.Get(rw20WrittenExemplarsHeader); v != "" { // Empty means zero. + s.Confirmed = true + if s.Exemplars, err = strconv.Atoi(v); err != nil { + s.Exemplars = 0 + errs = append(errs, err) + } + } + return s, errors.Join(errs...) +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index d822373717..6756bf0abe 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -19,7 +19,6 @@ import ( "fmt" "io" "net/http" - "strconv" "strings" "time" @@ -201,7 +200,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { respStats, errHTTPCode, err := h.writeV2(r.Context(), &req) // Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases. - respStats.SetResponseHeaders(w.Header()) + respStats.SetHeaders(w) if err != nil { if errHTTPCode/5 == 100 { // 5xx @@ -318,24 +317,6 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist return nil } -const ( - prw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Written-Samples" - rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Written-Histograms" - rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Written-Exemplars" -) - -type responseStats struct { - samples int - histograms int - exemplars int -} - -func (s responseStats) SetResponseHeaders(h http.Header) { - h.Set(prw20WrittenSamplesHeader, strconv.Itoa(s.samples)) - h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.histograms)) - h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.exemplars)) -} - // writeV2 is similar to write, but it works with v2 proto message, // allows partial 4xx writes and gathers statistics. // @@ -345,14 +326,14 @@ func (s responseStats) SetResponseHeaders(h http.Header) { // // NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors. // Once we have 5xx type of error, we immediately stop and rollback all appends. -func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ responseStats, errHTTPCode int, _ error) { +func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) { app := &timeLimitAppender{ Appender: h.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } - rs := responseStats{} - samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &rs) + s := WriteResponseStats{} + samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &s) if err != nil { if errHTTPCode/5 == 100 { // On 5xx, we always rollback, because we expect @@ -360,29 +341,29 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ res if rerr := app.Rollback(); rerr != nil { level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr) } - return responseStats{}, errHTTPCode, err + return WriteResponseStats{}, errHTTPCode, err } // Non-retriable (e.g. bad request error case). Can be partially written. commitErr := app.Commit() if commitErr != nil { // Bad requests does not matter as we have internal error (retryable). - return responseStats{}, http.StatusInternalServerError, commitErr + return WriteResponseStats{}, http.StatusInternalServerError, commitErr } // Bad request error happened, but rest of data (if any) was written. h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata)) - return rs, errHTTPCode, err + return s, errHTTPCode, err } // All good just commit. if err := app.Commit(); err != nil { - return responseStats{}, http.StatusInternalServerError, err + return WriteResponseStats{}, http.StatusInternalServerError, err } h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata)) - return rs, 0, nil + return s, 0, nil } -func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *responseStats) (samplesWithoutMetadata, errHTTPCode int, err error) { +func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) { var ( badRequestErrs []error outOfOrderExemplarErrs, samplesWithInvalidLabels int @@ -400,14 +381,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * continue } - allSamplesSoFar := rs.samples + rs.histograms + allSamplesSoFar := rs.AllSamples() var ref storage.SeriesRef // Samples. for _, s := range ts.Samples { ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) if err == nil { - rs.samples++ + rs.Samples++ continue } // Handle append error. @@ -431,7 +412,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil) } if err == nil { - rs.histograms++ + rs.Histograms++ continue } // Handle append error. @@ -453,18 +434,19 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * e := ep.ToExemplar(&b, req.Symbols) ref, err = app.AppendExemplar(ref, ls, e) if err == nil { - rs.exemplars++ + rs.Exemplars++ continue } // Handle append error. - // TODO(bwplotka): I left the logic as in v1, but we might want to make it consistent with samples and histograms. - // Since exemplar storage is still experimental, we don't fail in anyway, the request on ingestion errors. if errors.Is(err, storage.ErrOutOfOrderExemplar) { - outOfOrderExemplarErrs++ - level.Debug(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here. + level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } - level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + // TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed. + // For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx. + level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) } m := ts.ToMetadata(req.Symbols) @@ -472,7 +454,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information, // we don't report remote write error either. We increment metric instead. - samplesWithoutMetadata += (rs.samples + rs.histograms) - allSamplesSoFar + samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 883b8e0608..772d37a48b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -257,7 +257,11 @@ func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch c mu.Unlock() continue } - ch <- uid + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- uid: + } } return nil }) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go index e9fe5eb7dc..ff3975663c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -32,7 +32,7 @@ func NewReaderPoolMetrics(reg prometheus.Registerer) *ReaderPoolMetrics { } } -// ReaderPool is used to istantiate new index-header readers and keep track of them. +// ReaderPool is used to instantiate new index-header readers and keep track of them. // When the lazy reader is enabled, the pool keeps track of all instantiated readers // and automatically close them once the idle timeout is reached. A closed lazy reader // will be automatically re-opened upon next usage. @@ -73,7 +73,7 @@ func (s IndexHeaderLazyDownloadStrategy) StrategyToDownloadFunc() LazyDownloadIn } } -// LazyDownloadIndexHeaderFunc is used to determinte whether to download the index header lazily +// LazyDownloadIndexHeaderFunc is used to determine whether to download the index header lazily // or not by checking its block metadata. Usecase can be by time or by index file size. type LazyDownloadIndexHeaderFunc func(meta *metadata.Meta) bool diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go index 8c10a9a874..a5b0c5b2a4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go @@ -134,7 +134,7 @@ type MemcachedClientConfig struct { MaxItemSize model.Bytes `yaml:"max_item_size"` // MaxGetMultiBatchSize specifies the maximum number of keys a single underlying - // GetMulti() should run. If more keys are specified, internally keys are splitted + // GetMulti() should run. If more keys are specified, internally keys are split // into multiple batches and fetched concurrently, honoring MaxGetMultiConcurrency parallelism. // If set to 0, the max batch size is unlimited. MaxGetMultiBatchSize int `yaml:"max_get_multi_batch_size"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go index dc94938d75..69f2baf165 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go +++ b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go @@ -199,11 +199,18 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig return newRT(tlsConfig) } - return config_util.NewTLSRoundTripper(tlsConfig, config_util.TLSRoundTripperSettings{ - CA: config_util.NewFileSecret(cfg.TLSConfig.CAFile), - Cert: config_util.NewFileSecret(cfg.TLSConfig.CertFile), - Key: config_util.NewFileSecret(cfg.TLSConfig.KeyFile), - }, newRT) + rtConfig := config_util.TLSRoundTripperSettings{ + Cert: config_util.NewFileSecret(cfg.TLSConfig.CAFile), + } + if len(cfg.TLSConfig.CertFile) > 0 { + rtConfig.Cert = config_util.NewFileSecret(cfg.TLSConfig.CertFile) + } + + if len(cfg.TLSConfig.KeyFile) > 0 { + rtConfig.Key = config_util.NewFileSecret(cfg.TLSConfig.KeyFile) + } + + return config_util.NewTLSRoundTripper(tlsConfig, rtConfig, newRT) } // NewHTTPClient returns a new HTTP client. diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 522e4c9d4c..7f08297671 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -894,7 +894,7 @@ func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp _, _ = sb.WriteString(",") } } - rerr = fmt.Errorf("paniced while compacting %s: %v", sb.String(), p) + rerr = fmt.Errorf("panicked while compacting %s: %v", sb.String(), p) } }() diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 6aa2b23dfe..46d590186e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -149,7 +149,7 @@ func Downsample( // Raw and already downsampled data need different processing. if origMeta.Thanos.Downsample.Resolution == 0 { for _, c := range chks { - // TODO(bwplotka): We can optimze this further by using in WriteSeries iterators of each chunk instead of + // TODO(bwplotka): We can optimize this further by using in WriteSeries iterators of each chunk instead of // samples. Also ensure 120 sample limit, otherwise we have gigantic chunks. // https://github.com/thanos-io/thanos/issues/2542. if err := expandChunkIterator(c.Chunk.Iterator(reuseIt), &all); err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go index 6d7d03eea2..394e33185b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go @@ -68,7 +68,7 @@ func (p *tsdbBasedPlanner) plan(noCompactMarked map[ulid.ULID]*metadata.NoCompac } // No overlapping blocks, do compaction the usual way. - // We do not include a recently producted block with max(minTime), so the block which was just uploaded to bucket. + // We do not include a recently produced block with max(minTime), so the block which was just uploaded to bucket. // This gives users a window of a full block size maintenance if needed. if _, excluded := noCompactMarked[metasByMinTime[len(metasByMinTime)-1].ULID]; !excluded { notExcludedMetasByMinTime = notExcludedMetasByMinTime[:len(notExcludedMetasByMinTime)-1] @@ -200,7 +200,7 @@ func splitByRange(metasByMinTime []*metadata.Meta, tr int64) [][]*metadata.Meta t0 = tr * ((m.MinTime - tr + 1) / tr) } - // Skip blocks that don't fall into the range. This can happen via mis-alignment or + // Skip blocks that don't fall into the range. This can happen via misalignment or // by being the multiple of the intended range. if m.MaxTime > t0+tr { i++ diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go new file mode 100644 index 0000000000..4e315596df --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go @@ -0,0 +1,97 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package dns + +import ( + "context" + "sync" + "time" + + grpcresolver "google.golang.org/grpc/resolver" +) + +var ( + _ grpcresolver.Builder = &builder{} + _ grpcresolver.Resolver = &resolver{} +) + +type builder struct { + resolveInterval time.Duration + provider *Provider +} + +func RegisterGRPCResolver(provider *Provider, interval time.Duration) { + grpcresolver.Register(&builder{ + resolveInterval: interval, + provider: provider, + }) +} + +func (b *builder) Scheme() string { return "thanos" } + +func (b *builder) Build(t grpcresolver.Target, cc grpcresolver.ClientConn, _ grpcresolver.BuildOptions) (grpcresolver.Resolver, error) { + ctx, cancel := context.WithCancel(context.Background()) + r := &resolver{ + provider: b.provider, + target: t.Endpoint(), + ctx: ctx, + cancel: cancel, + cc: cc, + interval: b.resolveInterval, + } + r.wg.Add(1) + go r.run() + + return r, nil +} + +type resolver struct { + provider *Provider + + target string + ctx context.Context + cancel context.CancelFunc + cc grpcresolver.ClientConn + interval time.Duration + + wg sync.WaitGroup +} + +func (r *resolver) Close() { + r.cancel() + r.wg.Wait() +} + +func (r *resolver) ResolveNow(_ grpcresolver.ResolveNowOptions) {} + +func (r *resolver) resolve() error { + ctx, cancel := context.WithTimeout(r.ctx, r.interval) + defer cancel() + return r.provider.Resolve(ctx, []string{r.target}) +} + +func (r *resolver) addresses() []string { + return r.provider.AddressesForHost(r.target) +} + +func (r *resolver) run() { + defer r.wg.Done() + for { + if err := r.resolve(); err != nil { + r.cc.ReportError(err) + } else { + state := grpcresolver.State{} + for _, addr := range r.addresses() { + raddr := grpcresolver.Address{Addr: addr} + state.Addresses = append(state.Addresses, raddr) + } + _ = r.cc.UpdateState(state) + } + select { + case <-r.ctx.Done(): + return + case <-time.After(r.interval): + } + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index 3ec032a654..8f42bf4d26 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -164,3 +164,16 @@ func (p *Provider) Addresses() []string { } return result } + +// AddressesForHost returns the latest addresses present for the host in the Provider. +func (p *Provider) AddressesForHost(host string) []string { + p.RLock() + defer p.RUnlock() + + addrs := p.resolved[host] + + res := make([]string, len(addrs)) + copy(res, addrs) + + return res +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index 5dde62c5ee..1f96f4c666 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -734,7 +734,7 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string // SeriesInGRPC returns the labels from Prometheus series API. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]map[string]string, error) { +func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]map[string]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/series") q := u.Query() @@ -742,6 +742,7 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*la q.Add("match[]", storepb.PromMatchersToString(matchers...)) q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) + q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { @@ -753,7 +754,7 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*la // LabelNamesInGRPC returns all known label names constrained by the given matchers. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]string, error) { +func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/labels") q := u.Query() @@ -763,6 +764,7 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers [ } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) + q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { @@ -773,7 +775,7 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers [ // LabelValuesInGRPC returns all known label values for a given label name. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []*labels.Matcher, startTime, endTime int64) ([]string, error) { +func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/label/", label, "/values") q := u.Query() @@ -783,6 +785,7 @@ func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label str } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) + q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go index b1faff425b..951db8ecc2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go @@ -717,7 +717,7 @@ func (er *endpointRef) updateMetadata(metadata *endpointMetadata, err error) { } // isQueryable returns true if an endpointRef should be used for querying. -// A strict endpointRef is always queriable. A non-strict endpointRef +// A strict endpointRef is always queryable. A non-strict endpointRef // is queryable if the last health check (info call) succeeded. func (er *endpointRef) isQueryable() bool { er.mtx.RLock() @@ -797,11 +797,7 @@ func (er *endpointRef) labelSets() []labels.Labels { labelSet := make([]labels.Labels, 0, len(er.metadata.LabelSets)) for _, ls := range labelpb.ZLabelSetsToPromLabelSets(er.metadata.LabelSets...) { - if len(ls) == 0 { - continue - } - // Compatibility label for Queriers pre 0.8.1. Filter it out now. - if ls[0].Name == store.CompatibilityTypeLabelName { + if ls.Len() == 0 { continue } labelSet = append(labelSet, ls.Copy()) diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go index 9fddae11a5..e084344ed9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go @@ -331,6 +331,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . req := storepb.SeriesRequest{ MinTime: hints.Start, MaxTime: hints.End, + Limit: int64(hints.Limit), Matchers: sms, MaxResolutionWindow: q.maxResolutionMillis, Aggregates: aggrs, @@ -373,7 +374,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . } // LabelValues returns all potential values for a label name. -func (q *querier) LabelValues(ctx context.Context, name string, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := tracing.StartSpan(ctx, "querier_label_values") defer span.Finish() @@ -384,12 +385,18 @@ func (q *querier) LabelValues(ctx context.Context, name string, _ *storage.Label if err != nil { return nil, nil, errors.Wrap(err, "converting prom matchers to storepb matchers") } + + if hints == nil { + hints = &storage.LabelHints{} + } + req := &storepb.LabelValuesRequest{ Label: name, PartialResponseStrategy: q.partialResponseStrategy, Start: q.mint, End: q.maxt, Matchers: pbMatchers, + Limit: int64(hints.Limit), } if q.isDedupEnabled() { @@ -411,7 +418,7 @@ func (q *querier) LabelValues(ctx context.Context, name string, _ *storage.Label // LabelNames returns all the unique label names present in the block in sorted order constrained // by the given matchers. -func (q *querier) LabelNames(ctx context.Context, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := tracing.StartSpan(ctx, "querier_label_names") defer span.Finish() @@ -423,11 +430,16 @@ func (q *querier) LabelNames(ctx context.Context, _ *storage.LabelHints, matcher return nil, nil, errors.Wrap(err, "converting prom matchers to storepb matchers") } + if hints == nil { + hints = &storage.LabelHints{} + } + req := &storepb.LabelNamesRequest{ PartialResponseStrategy: q.partialResponseStrategy, Start: q.mint, End: q.maxt, Matchers: pbMatchers, + Limit: int64(hints.Limit), } if q.isDedupEnabled() { diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go index 941014a06a..298ce03063 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go @@ -109,6 +109,9 @@ type RulesRequest struct { Type RulesRequest_Type `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.RulesRequest_Type" json:"type,omitempty"` PartialResponseStrategy storepb.PartialResponseStrategy `protobuf:"varint,2,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` MatcherString []string `protobuf:"bytes,3,rep,name=matcher_string,json=matcherString,proto3" json:"matcher_string,omitempty"` + RuleName []string `protobuf:"bytes,4,rep,name=rule_name,json=ruleName,proto3" json:"rule_name,omitempty"` + RuleGroup []string `protobuf:"bytes,5,rep,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"` + File []string `protobuf:"bytes,6,rep,name=file,proto3" json:"file,omitempty"` } func (m *RulesRequest) Reset() { *m = RulesRequest{} } @@ -554,74 +557,76 @@ func init() { func init() { proto.RegisterFile("rules/rulespb/rpc.proto", fileDescriptor_91b1d28f30eb5efb) } var fileDescriptor_91b1d28f30eb5efb = []byte{ - // 1058 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0x23, 0xc7, - 0x13, 0xf7, 0x60, 0xcf, 0xd8, 0x53, 0xc6, 0x2c, 0xdb, 0x0b, 0x62, 0x60, 0xff, 0xf2, 0x20, 0x4b, - 0xfc, 0x45, 0xa2, 0xac, 0x1d, 0x81, 0x76, 0xa3, 0x3d, 0x45, 0x98, 0x8f, 0x05, 0x09, 0x91, 0x55, - 0x1b, 0xe5, 0xb0, 0x39, 0x38, 0x8d, 0x69, 0xcc, 0x28, 0xe3, 0x99, 0xd9, 0xee, 0x36, 0x11, 0x6f, - 0xb1, 0xe7, 0xbc, 0x48, 0x5e, 0x81, 0x5b, 0xf6, 0x98, 0x93, 0x93, 0xc0, 0x29, 0x3e, 0xe4, 0x19, - 0xa2, 0xae, 0x9e, 0xb1, 0x0d, 0x81, 0xb0, 0x9b, 0x90, 0xcb, 0x54, 0x77, 0xd5, 0xaf, 0x7a, 0xea, - 0xe3, 0xd7, 0x35, 0x03, 0x0b, 0xa2, 0x1f, 0x72, 0xd9, 0xc0, 0x67, 0x72, 0xd4, 0x10, 0x49, 0xa7, - 0x9e, 0x88, 0x58, 0xc5, 0xc4, 0x51, 0xa7, 0x2c, 0x8a, 0xe5, 0xd2, 0xa2, 0x54, 0xb1, 0xe0, 0x0d, - 0x7c, 0x26, 0x47, 0x0d, 0x75, 0x9e, 0x70, 0x69, 0x20, 0x99, 0x29, 0x64, 0x47, 0x3c, 0xbc, 0x61, - 0x9a, 0xeb, 0xc6, 0xdd, 0x18, 0x97, 0x0d, 0xbd, 0x4a, 0xb5, 0x7e, 0x37, 0x8e, 0xbb, 0x21, 0x6f, - 0xe0, 0xee, 0xa8, 0x7f, 0xd2, 0x50, 0x41, 0x8f, 0x4b, 0xc5, 0x7a, 0x89, 0x01, 0xd4, 0x7e, 0xb7, - 0x60, 0x9a, 0xea, 0x50, 0x28, 0x7f, 0xdb, 0xe7, 0x52, 0x91, 0x67, 0x50, 0xd0, 0xc7, 0x7a, 0xd6, - 0xb2, 0xb5, 0x3a, 0xb3, 0xb6, 0x58, 0x37, 0x41, 0xd5, 0x27, 0x31, 0xf5, 0xc3, 0xf3, 0x84, 0x53, - 0x84, 0x91, 0x6f, 0x60, 0x31, 0x61, 0x42, 0x05, 0x2c, 0x6c, 0x0b, 0x2e, 0x93, 0x38, 0x92, 0xbc, - 0x2d, 0x95, 0x60, 0x8a, 0x77, 0xcf, 0xbd, 0x29, 0x3c, 0xc3, 0xcf, 0xce, 0x78, 0x6d, 0x80, 0x34, - 0xc5, 0xb5, 0x52, 0x18, 0x5d, 0x48, 0x6e, 0x37, 0x90, 0x15, 0x98, 0xe9, 0x31, 0xd5, 0x39, 0xe5, - 0x42, 0x9f, 0x19, 0x44, 0x5d, 0x2f, 0xbf, 0x9c, 0x5f, 0x75, 0x69, 0x25, 0xd5, 0xb6, 0x50, 0x59, - 0xfb, 0x3f, 0x14, 0x74, 0x44, 0xa4, 0x08, 0xf9, 0x8d, 0xfd, 0xfd, 0xd9, 0x1c, 0x71, 0xc1, 0xde, - 0xd8, 0xdf, 0xa6, 0x87, 0xb3, 0x16, 0x01, 0x70, 0xe8, 0xf6, 0xe6, 0x57, 0x74, 0x6b, 0x76, 0xaa, - 0xf6, 0x2d, 0x54, 0xd2, 0x34, 0xcc, 0x7b, 0xc8, 0x27, 0x60, 0x77, 0x45, 0xdc, 0x4f, 0x30, 0xd9, - 0xf2, 0xda, 0xe3, 0xc9, 0x64, 0x5f, 0x69, 0xc3, 0x6e, 0x8e, 0x1a, 0x04, 0x59, 0x82, 0xe2, 0xf7, - 0x4c, 0x44, 0x3a, 0x06, 0x9d, 0x95, 0xbb, 0x9b, 0xa3, 0x99, 0xa2, 0x59, 0x02, 0x47, 0x70, 0xd9, - 0x0f, 0x55, 0x6d, 0x13, 0x60, 0xe4, 0x2b, 0xc9, 0x73, 0x70, 0xd0, 0x59, 0x7a, 0xd6, 0x72, 0xfe, - 0xd6, 0xf3, 0x9b, 0x30, 0x1c, 0xf8, 0x29, 0x88, 0xa6, 0xb2, 0xf6, 0x47, 0x1e, 0xdc, 0x11, 0x82, - 0xfc, 0x0f, 0x0a, 0x11, 0xeb, 0x99, 0x7e, 0xb8, 0xcd, 0xd2, 0x70, 0xe0, 0xe3, 0x9e, 0xe2, 0x53, - 0x5b, 0x4f, 0x82, 0x90, 0x9b, 0x98, 0x8c, 0x55, 0xef, 0x29, 0x3e, 0xc9, 0x33, 0xb0, 0x91, 0x66, - 0x58, 0xb6, 0xf2, 0xda, 0xf4, 0xe4, 0xfb, 0x9b, 0xee, 0x70, 0xe0, 0x1b, 0x33, 0x35, 0x82, 0xac, - 0x42, 0x29, 0x88, 0x14, 0x17, 0x67, 0x2c, 0xf4, 0x0a, 0xcb, 0xd6, 0xaa, 0xd5, 0x9c, 0x1e, 0x0e, - 0xfc, 0x91, 0x8e, 0x8e, 0x56, 0x84, 0xc2, 0x53, 0x7e, 0xc6, 0xc2, 0x3e, 0x53, 0x41, 0x1c, 0xb5, - 0x8f, 0xfb, 0xc2, 0x2c, 0x24, 0xef, 0xc4, 0xd1, 0xb1, 0xf4, 0x6c, 0x74, 0x26, 0xc3, 0x81, 0x3f, - 0x33, 0x86, 0x1d, 0x06, 0x3d, 0x4e, 0x17, 0xc7, 0xfb, 0xad, 0xd4, 0xab, 0x65, 0x9c, 0x48, 0x1b, - 0x1e, 0x85, 0x4c, 0xaa, 0xf6, 0x18, 0xe1, 0x39, 0xd8, 0x96, 0xa5, 0xba, 0x21, 0x71, 0x3d, 0x23, - 0x71, 0xfd, 0x30, 0x23, 0x71, 0x73, 0xe9, 0x62, 0xe0, 0xe7, 0xf4, 0x7b, 0xb4, 0xeb, 0xf6, 0xc8, - 0xf3, 0xdd, 0x2f, 0xbe, 0x45, 0x6f, 0xe8, 0x88, 0x0f, 0x76, 0x18, 0xf4, 0x02, 0xe5, 0xb9, 0xcb, - 0xd6, 0x6a, 0xde, 0xe4, 0x8f, 0x0a, 0x6a, 0x04, 0x39, 0x83, 0x85, 0x3b, 0x28, 0xea, 0x95, 0x3e, - 0x88, 0xc9, 0xcd, 0xa7, 0xc3, 0x81, 0x7f, 0x17, 0x9b, 0xe9, 0x5d, 0x87, 0xd7, 0x22, 0x28, 0xe8, - 0x8e, 0x90, 0xe7, 0xe0, 0x0a, 0xde, 0x89, 0xc5, 0xb1, 0x66, 0x99, 0xa1, 0xe4, 0xfc, 0xa8, 0x65, - 0x99, 0x41, 0x23, 0x77, 0x73, 0x74, 0x8c, 0x24, 0x2b, 0x60, 0xb3, 0x90, 0x0b, 0x85, 0x24, 0x28, - 0xaf, 0x55, 0x32, 0x97, 0x0d, 0xad, 0xd4, 0x0c, 0x46, 0xeb, 0x04, 0x4b, 0x7f, 0xcc, 0x43, 0x05, - 0x8d, 0x7b, 0x91, 0x54, 0x2c, 0xea, 0x70, 0xf2, 0x12, 0x1c, 0x9c, 0x29, 0xf2, 0xe6, 0x4d, 0x78, - 0xb3, 0xaf, 0xd5, 0x2d, 0xae, 0x9a, 0x33, 0x69, 0xa5, 0x53, 0x20, 0x4d, 0x25, 0xd9, 0x85, 0x32, - 0x8b, 0xa2, 0x58, 0x61, 0x8d, 0x65, 0x1a, 0xc3, 0x2d, 0xfe, 0x4f, 0x52, 0xff, 0x49, 0x34, 0x9d, - 0xdc, 0x90, 0x75, 0xb0, 0xa5, 0x62, 0x8a, 0x7b, 0x79, 0x2c, 0x36, 0xb9, 0x96, 0x47, 0x4b, 0x5b, - 0x4c, 0xcf, 0x10, 0x44, 0x8d, 0x20, 0x2d, 0x70, 0x59, 0x47, 0x05, 0x67, 0xbc, 0xcd, 0x14, 0x92, - 0xf6, 0x1e, 0xbe, 0x0c, 0x07, 0x3e, 0x31, 0x0e, 0x1b, 0xea, 0xb3, 0xb8, 0x17, 0x28, 0xde, 0x4b, - 0xd4, 0x39, 0xf2, 0xa5, 0x94, 0xe9, 0x35, 0x53, 0x34, 0x6d, 0x38, 0x12, 0xd9, 0x35, 0x6f, 0x45, - 0x05, 0x35, 0xe2, 0xef, 0x98, 0xe2, 0xfc, 0x97, 0x4c, 0xf9, 0xc9, 0x06, 0x1b, 0xcb, 0x31, 0x2e, - 0x96, 0xf5, 0x11, 0xc5, 0xca, 0x66, 0xc9, 0xd4, 0xad, 0xb3, 0xc4, 0x07, 0xfb, 0x6d, 0x9f, 0x8b, - 0x73, 0xac, 0x7f, 0x9a, 0x35, 0x2a, 0xa8, 0x11, 0xe4, 0x0b, 0x98, 0xfd, 0xcb, 0x55, 0x9f, 0x98, - 0x13, 0x99, 0x8d, 0x3e, 0x3a, 0xbe, 0x71, 0xb5, 0xc7, 0xf4, 0xb2, 0xff, 0x25, 0xbd, 0x9c, 0x7f, - 0x4e, 0xaf, 0x97, 0xe0, 0xe0, 0x45, 0x90, 0x5e, 0x11, 0xa7, 0xe1, 0xfc, 0xb5, 0x92, 0x65, 0x57, - 0xc1, 0x4c, 0x64, 0x03, 0xa4, 0xa9, 0x24, 0x35, 0x70, 0x4e, 0x39, 0x0b, 0xd5, 0x29, 0xce, 0x01, - 0xd7, 0x60, 0x8c, 0x86, 0xa6, 0x92, 0xbc, 0x00, 0x30, 0xe3, 0x4b, 0x88, 0x58, 0xe0, 0x88, 0x71, - 0x9b, 0x0b, 0xc3, 0x81, 0xff, 0x04, 0xa7, 0x90, 0x56, 0x8e, 0xe9, 0x46, 0xdd, 0x91, 0xf2, 0xbe, - 0x51, 0x0a, 0x0f, 0x34, 0x4a, 0xcb, 0x0f, 0x3a, 0x4a, 0x77, 0x61, 0xe1, 0x3b, 0xce, 0x93, 0xf6, - 0x49, 0xa0, 0x3f, 0xc0, 0xed, 0x93, 0x58, 0x8c, 0x02, 0x9e, 0xc6, 0x80, 0x1f, 0x0f, 0x07, 0x7e, - 0x45, 0x43, 0x76, 0x10, 0xb1, 0x13, 0x0b, 0x3a, 0x77, 0x6d, 0x9b, 0x86, 0x5a, 0xfb, 0x21, 0x0f, - 0x95, 0x6b, 0xb3, 0xed, 0x9e, 0x0f, 0xde, 0x88, 0xa4, 0x53, 0x77, 0x90, 0x74, 0xcc, 0xb5, 0xfc, - 0xc7, 0x72, 0x6d, 0xdc, 0xe6, 0xc2, 0x07, 0xb6, 0xd9, 0x7e, 0xa8, 0x36, 0x3b, 0x0f, 0xd4, 0xe6, - 0xe2, 0x43, 0xb6, 0xf9, 0xd3, 0x75, 0x80, 0xf1, 0x3c, 0x21, 0xd3, 0x50, 0xda, 0x3b, 0xd8, 0xd8, - 0x3c, 0xdc, 0xfb, 0x7a, 0x7b, 0x36, 0x47, 0xca, 0x50, 0x7c, 0xbd, 0x7d, 0xb0, 0xb5, 0x77, 0xf0, - 0xca, 0xfc, 0x65, 0xed, 0xec, 0x51, 0xbd, 0x9e, 0x5a, 0xfb, 0x12, 0x6c, 0xfc, 0xcb, 0x22, 0x2f, - 0xb2, 0xc5, 0xdc, 0x6d, 0x3f, 0x91, 0x4b, 0xf3, 0x37, 0xb4, 0x66, 0xd4, 0x7d, 0x6e, 0x35, 0x57, - 0x2e, 0x7e, 0xab, 0xe6, 0x2e, 0x2e, 0xab, 0xd6, 0xfb, 0xcb, 0xaa, 0xf5, 0xeb, 0x65, 0xd5, 0x7a, - 0x77, 0x55, 0xcd, 0xbd, 0xbf, 0xaa, 0xe6, 0x7e, 0xbe, 0xaa, 0xe6, 0xde, 0x14, 0xd3, 0x1f, 0xe7, - 0x23, 0x07, 0x93, 0x5b, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0xeb, 0xed, 0x6d, 0x27, 0x50, 0x0b, - 0x00, 0x00, + // 1096 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0x23, 0x47, + 0x10, 0xf6, 0xd8, 0x9e, 0xb1, 0xa7, 0x8c, 0x59, 0xb6, 0x17, 0xc4, 0x00, 0x89, 0x07, 0x59, 0x22, + 0x22, 0x51, 0xd6, 0x8e, 0x40, 0xbb, 0xd1, 0x9e, 0x22, 0xcc, 0xcf, 0x82, 0x84, 0xc8, 0xaa, 0x8d, + 0x72, 0xd8, 0x1c, 0x9c, 0xc6, 0x34, 0x66, 0x94, 0xf1, 0xcc, 0x6c, 0x4f, 0x9b, 0x88, 0xb7, 0xd8, + 0x73, 0x5e, 0x24, 0xca, 0x1b, 0x70, 0xcb, 0x1e, 0x73, 0x72, 0x12, 0xb8, 0xf9, 0x90, 0x67, 0x88, + 0xba, 0x7a, 0xc6, 0x63, 0x08, 0x84, 0xdd, 0x84, 0x5c, 0xdc, 0xdd, 0x5f, 0x7d, 0xd5, 0x3f, 0x55, + 0x5f, 0x95, 0x07, 0xe6, 0xc5, 0xc0, 0xe7, 0x71, 0x13, 0x7f, 0xa3, 0xa3, 0xa6, 0x88, 0xba, 0x8d, + 0x48, 0x84, 0x32, 0x24, 0x96, 0x3c, 0x65, 0x41, 0x18, 0x2f, 0x2e, 0xc4, 0x32, 0x14, 0xbc, 0x89, + 0xbf, 0xd1, 0x51, 0x53, 0x9e, 0x47, 0x3c, 0xd6, 0x94, 0xd4, 0xe4, 0xb3, 0x23, 0xee, 0xdf, 0x30, + 0xcd, 0xf6, 0xc2, 0x5e, 0x88, 0xd3, 0xa6, 0x9a, 0x25, 0xa8, 0xdb, 0x0b, 0xc3, 0x9e, 0xcf, 0x9b, + 0xb8, 0x3a, 0x1a, 0x9c, 0x34, 0xa5, 0xd7, 0xe7, 0xb1, 0x64, 0xfd, 0x48, 0x13, 0xea, 0x3f, 0xe7, + 0x61, 0x8a, 0xaa, 0xab, 0x50, 0xfe, 0x66, 0xc0, 0x63, 0x49, 0x9e, 0x42, 0x51, 0x6d, 0xeb, 0x18, + 0xcb, 0xc6, 0xea, 0xf4, 0xda, 0x42, 0x43, 0x5f, 0xaa, 0x31, 0xc9, 0x69, 0x1c, 0x9e, 0x47, 0x9c, + 0x22, 0x8d, 0x7c, 0x0b, 0x0b, 0x11, 0x13, 0xd2, 0x63, 0x7e, 0x47, 0xf0, 0x38, 0x0a, 0x83, 0x98, + 0x77, 0x62, 0x29, 0x98, 0xe4, 0xbd, 0x73, 0x27, 0x8f, 0x7b, 0xb8, 0xe9, 0x1e, 0xaf, 0x34, 0x91, + 0x26, 0xbc, 0x76, 0x42, 0xa3, 0xf3, 0xd1, 0xed, 0x06, 0xb2, 0x02, 0xd3, 0x7d, 0x26, 0xbb, 0xa7, + 0x5c, 0xa8, 0x3d, 0xbd, 0xa0, 0xe7, 0x14, 0x96, 0x0b, 0xab, 0x36, 0xad, 0x26, 0x68, 0x1b, 0x41, + 0xb2, 0x04, 0xb6, 0x8a, 0x66, 0x27, 0x60, 0x7d, 0xee, 0x14, 0x91, 0x51, 0x56, 0xc0, 0x01, 0xeb, + 0x73, 0xf2, 0x31, 0x00, 0x1a, 0x7b, 0x22, 0x1c, 0x44, 0x8e, 0x89, 0x56, 0xa4, 0xbf, 0x54, 0x00, + 0x21, 0x50, 0x3c, 0xf1, 0x7c, 0xee, 0x58, 0x68, 0xc0, 0x79, 0xfd, 0x13, 0x28, 0xaa, 0x17, 0x92, + 0x12, 0x14, 0x36, 0xf6, 0xf7, 0x67, 0x72, 0xc4, 0x06, 0x73, 0x63, 0x7f, 0x9b, 0x1e, 0xce, 0x18, + 0x04, 0xc0, 0xa2, 0xdb, 0x9b, 0x5f, 0xd3, 0xad, 0x99, 0x7c, 0xfd, 0x3b, 0xa8, 0x26, 0x61, 0xd1, + 0xf7, 0x26, 0x9f, 0x82, 0xa9, 0x8f, 0x51, 0xc1, 0xab, 0xac, 0x3d, 0x9e, 0x0c, 0x1e, 0x1e, 0xb7, + 0x9b, 0xa3, 0x9a, 0x41, 0x16, 0xa1, 0xf4, 0x03, 0x13, 0x81, 0x7a, 0x93, 0x8a, 0x92, 0xbd, 0x9b, + 0xa3, 0x29, 0xd0, 0x2a, 0x83, 0x25, 0x78, 0x3c, 0xf0, 0x65, 0x7d, 0x13, 0x60, 0xec, 0x1b, 0x93, + 0x67, 0x60, 0xa1, 0x73, 0xec, 0x18, 0xcb, 0x85, 0x5b, 0xf7, 0x6f, 0xc1, 0x68, 0xe8, 0x26, 0x24, + 0x9a, 0x8c, 0xf5, 0x3f, 0x0b, 0x60, 0x8f, 0x19, 0xe4, 0x23, 0x28, 0x62, 0x9c, 0xd4, 0x15, 0xed, + 0x56, 0x79, 0x34, 0x74, 0x71, 0x4d, 0xf1, 0x57, 0x59, 0x31, 0x1c, 0xf9, 0xcc, 0xaa, 0xd6, 0x3a, + 0x30, 0xe4, 0x29, 0x98, 0x28, 0x5b, 0x4c, 0x43, 0x65, 0x6d, 0x6a, 0xf2, 0xfc, 0x96, 0x3d, 0x1a, + 0xba, 0xda, 0x4c, 0xf5, 0x40, 0x56, 0xa1, 0xec, 0x05, 0x92, 0x8b, 0x33, 0xe6, 0x3b, 0xc5, 0x65, + 0x63, 0xd5, 0x68, 0x4d, 0x8d, 0x86, 0xee, 0x18, 0xa3, 0xe3, 0x19, 0xa1, 0xb0, 0xc4, 0xcf, 0x98, + 0x3f, 0x60, 0xd2, 0x0b, 0x83, 0xce, 0xf1, 0x40, 0xe8, 0x49, 0xcc, 0xbb, 0x61, 0x70, 0x1c, 0x3b, + 0x26, 0x3a, 0x93, 0xd1, 0xd0, 0x9d, 0xce, 0x68, 0x87, 0x5e, 0x9f, 0xd3, 0x85, 0x6c, 0xbd, 0x95, + 0x78, 0xb5, 0xb5, 0x13, 0xe9, 0xc0, 0x23, 0x9f, 0xc5, 0xb2, 0x93, 0x31, 0x1c, 0x0b, 0xd3, 0xb2, + 0xd8, 0xd0, 0x45, 0xd1, 0x48, 0x8b, 0xa2, 0x71, 0x98, 0x16, 0x45, 0x6b, 0xf1, 0x62, 0xe8, 0xe6, + 0xd4, 0x39, 0xca, 0x75, 0x7b, 0xec, 0xf9, 0xf6, 0x37, 0xd7, 0xa0, 0x37, 0x30, 0xe2, 0x82, 0xe9, + 0x7b, 0x7d, 0x4f, 0x3a, 0xf6, 0xb2, 0xb1, 0x5a, 0xd0, 0xef, 0x47, 0x80, 0xea, 0x81, 0x9c, 0xc1, + 0xfc, 0x1d, 0x92, 0x77, 0xca, 0xef, 0x55, 0x19, 0xad, 0xa5, 0xd1, 0xd0, 0xbd, 0xab, 0x3a, 0xe8, + 0x5d, 0x9b, 0xd7, 0x03, 0x28, 0xaa, 0x8c, 0x90, 0x67, 0x60, 0x0b, 0xde, 0x0d, 0xc5, 0xb1, 0x52, + 0x99, 0x96, 0xe4, 0xdc, 0x38, 0x65, 0xa9, 0x41, 0x31, 0x77, 0x73, 0x34, 0x63, 0x92, 0x15, 0x30, + 0x99, 0xcf, 0x85, 0x44, 0x11, 0x54, 0xd6, 0xaa, 0xa9, 0xcb, 0x86, 0x02, 0x95, 0x82, 0xd1, 0x3a, + 0xa1, 0xd2, 0x9f, 0x0a, 0x50, 0x45, 0xe3, 0x5e, 0x10, 0x4b, 0x16, 0x74, 0x39, 0x79, 0x01, 0x16, + 0xf6, 0xa8, 0xf8, 0x66, 0x25, 0xbc, 0xde, 0x57, 0x70, 0x9b, 0xcb, 0xd6, 0x74, 0x12, 0xe9, 0x84, + 0x48, 0x93, 0x91, 0xec, 0x42, 0x85, 0x05, 0x41, 0x28, 0x31, 0xc6, 0x71, 0x72, 0x87, 0x5b, 0xfc, + 0x9f, 0x24, 0xfe, 0x93, 0x6c, 0x3a, 0xb9, 0x20, 0xeb, 0x60, 0xc6, 0x92, 0x49, 0xee, 0x14, 0x30, + 0xd8, 0xe4, 0xda, 0x3b, 0xda, 0xca, 0xa2, 0x73, 0x86, 0x24, 0xaa, 0x07, 0xd2, 0x06, 0x9b, 0x75, + 0xa5, 0x77, 0xc6, 0x3b, 0x4c, 0xa2, 0x68, 0xef, 0xd1, 0xcb, 0x68, 0xe8, 0x12, 0xed, 0xb0, 0x21, + 0x3f, 0x0f, 0xfb, 0x9e, 0xe4, 0xfd, 0x48, 0x9e, 0xa3, 0x5e, 0xca, 0x29, 0xae, 0x94, 0xa2, 0x64, + 0xc3, 0x51, 0xc8, 0xb6, 0x3e, 0x15, 0x01, 0xaa, 0x87, 0x7f, 0x52, 0x8a, 0xf5, 0x7f, 0x2a, 0xe5, + 0x17, 0x13, 0x4c, 0x0c, 0x47, 0x16, 0x2c, 0xe3, 0x03, 0x82, 0x95, 0xf6, 0x92, 0xfc, 0xad, 0xbd, + 0xc4, 0x05, 0xf3, 0xcd, 0x80, 0x8b, 0x73, 0x8c, 0x7f, 0xf2, 0x6a, 0x04, 0xa8, 0x1e, 0xc8, 0x97, + 0x30, 0xf3, 0xb7, 0x52, 0x9f, 0xe8, 0x13, 0xa9, 0x8d, 0x3e, 0x3a, 0xbe, 0x51, 0xda, 0x99, 0xbc, + 0xcc, 0xff, 0x28, 0x2f, 0xeb, 0xdf, 0xcb, 0xeb, 0x05, 0x58, 0x58, 0x08, 0xb1, 0x53, 0xc2, 0x6e, + 0x38, 0x77, 0x2d, 0x64, 0x69, 0x29, 0xe8, 0x8e, 0xac, 0x89, 0x34, 0x19, 0x49, 0x1d, 0xac, 0x53, + 0xce, 0x7c, 0x79, 0x8a, 0x7d, 0xc0, 0xd6, 0x1c, 0x8d, 0xd0, 0x64, 0x24, 0xcf, 0x01, 0x74, 0xfb, + 0x12, 0x22, 0x14, 0xd8, 0x62, 0xec, 0xd6, 0xfc, 0x68, 0xe8, 0x3e, 0xc1, 0x2e, 0xa4, 0xc0, 0x4c, + 0x6e, 0xd4, 0x1e, 0x83, 0xf7, 0xb5, 0x52, 0x78, 0xa0, 0x56, 0x5a, 0x79, 0xd0, 0x56, 0xba, 0x0b, + 0xf3, 0xdf, 0x73, 0x1e, 0x75, 0x4e, 0x3c, 0xf5, 0x87, 0xde, 0x39, 0x09, 0xc5, 0xf8, 0xc2, 0x53, + 0x78, 0xe1, 0xc7, 0xa3, 0xa1, 0x5b, 0x55, 0x94, 0x1d, 0x64, 0xec, 0x84, 0x82, 0xce, 0x5e, 0x5b, + 0x26, 0x57, 0xad, 0xff, 0x58, 0x80, 0xea, 0xb5, 0xde, 0x76, 0xcf, 0x1f, 0xde, 0x58, 0xa4, 0xf9, + 0x3b, 0x44, 0x9a, 0x69, 0xad, 0xf0, 0xa1, 0x5a, 0xcb, 0xd2, 0x5c, 0x7c, 0xcf, 0x34, 0x9b, 0x0f, + 0x95, 0x66, 0xeb, 0x81, 0xd2, 0x5c, 0x7a, 0xc8, 0x34, 0x7f, 0xb6, 0x0e, 0x90, 0xf5, 0x13, 0x32, + 0x05, 0xe5, 0xbd, 0x83, 0x8d, 0xcd, 0xc3, 0xbd, 0x6f, 0xb6, 0x67, 0x72, 0xa4, 0x02, 0xa5, 0x57, + 0xdb, 0x07, 0x5b, 0x7b, 0x07, 0x2f, 0xf5, 0x57, 0xd6, 0xce, 0x1e, 0x55, 0xf3, 0xfc, 0xda, 0x57, + 0x60, 0xe2, 0x57, 0x16, 0x79, 0x9e, 0x4e, 0x66, 0x6f, 0xfb, 0x28, 0x5d, 0x9c, 0xbb, 0x81, 0xea, + 0x56, 0xf7, 0x85, 0xd1, 0x5a, 0xb9, 0xf8, 0xa3, 0x96, 0xbb, 0xb8, 0xac, 0x19, 0xef, 0x2e, 0x6b, + 0xc6, 0xef, 0x97, 0x35, 0xe3, 0xed, 0x55, 0x2d, 0xf7, 0xee, 0xaa, 0x96, 0xfb, 0xf5, 0xaa, 0x96, + 0x7b, 0x5d, 0x4a, 0x3e, 0xc4, 0x8f, 0x2c, 0x7c, 0xdc, 0xfa, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xa0, 0xc1, 0xf7, 0x10, 0xa0, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -755,6 +760,33 @@ func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.File) > 0 { + for iNdEx := len(m.File) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.File[iNdEx]) + copy(dAtA[i:], m.File[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.File[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RuleGroup) > 0 { + for iNdEx := len(m.RuleGroup) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RuleGroup[iNdEx]) + copy(dAtA[i:], m.RuleGroup[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RuleGroup[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.RuleName) > 0 { + for iNdEx := len(m.RuleName) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RuleName[iNdEx]) + copy(dAtA[i:], m.RuleName[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RuleName[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if len(m.MatcherString) > 0 { for iNdEx := len(m.MatcherString) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.MatcherString[iNdEx]) @@ -1326,6 +1358,24 @@ func (m *RulesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if len(m.RuleName) > 0 { + for _, s := range m.RuleName { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.RuleGroup) > 0 { + for _, s := range m.RuleGroup { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.File) > 0 { + for _, s := range m.File { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -1664,6 +1714,102 @@ func (m *RulesRequest) Unmarshal(dAtA []byte) error { } m.MatcherString = append(m.MatcherString, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuleName = append(m.RuleName, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuleGroup = append(m.RuleGroup, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.File = append(m.File, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto index 25d809ede9..f5fc8a038b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto @@ -41,6 +41,9 @@ message RulesRequest { Type type = 1; PartialResponseStrategy partial_response_strategy = 2; repeated string matcher_string = 3; + repeated string rule_name = 4; + repeated string rule_group = 5; + repeated string file = 6; } message RulesResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go index 809dfce36b..9aaeeca615 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -3,7 +3,7 @@ // Package runutil provides helpers to advanced function scheduling control like repeat or retry. // -// It's very often the case when you need to excutes some code every fixed intervals or have it retried automatically. +// It's very often the case when you need to executes some code every fixed intervals or have it retried automatically. // To make it reliably with proper timeout, you need to carefully arrange some boilerplate for this. // Below function does it for you. // diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 75a85dd9fb..32a4323b62 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -117,6 +117,9 @@ const ( // SeriesBatchSize is the default batch size when fetching series from object storage. SeriesBatchSize = 10000 + + // checkContextEveryNIterations is used in some tight loops to check if the context is done. + checkContextEveryNIterations = 128 ) var ( @@ -1005,6 +1008,7 @@ type blockSeriesClient struct { mint int64 maxt int64 + seriesLimit int indexr *bucketIndexReader chunkr *bucketChunkReader loadAggregates []storepb.Aggr @@ -1080,6 +1084,7 @@ func newBlockSeriesClient( mint: req.MinTime, maxt: req.MaxTime, + seriesLimit: int(req.Limit), indexr: b.indexReader(logger), chunkr: chunkr, seriesLimiter: seriesLimiter, @@ -1159,14 +1164,20 @@ func (b *blockSeriesClient) ExpandPostings( b.expandedPostings = make([]storage.SeriesRef, 0, len(b.lazyPostings.postings)/2) b.lazyExpandedPostingsCount.Inc() } else { + // If seriesLimit is set, it can be applied here to limit the amount of series. + // Note: This can only be done when postings are not expanded lazily. + if b.seriesLimit > 0 && len(b.lazyPostings.postings) > b.seriesLimit { + b.lazyPostings.postings = b.lazyPostings.postings[:b.seriesLimit] + } + // Apply series limiter eargerly if lazy postings not enabled. - if err := seriesLimiter.Reserve(uint64(len(ps.postings))); err != nil { + if err := seriesLimiter.Reserve(uint64(len(b.lazyPostings.postings))); err != nil { return httpgrpc.Errorf(int(codes.ResourceExhausted), "exceeded series limit: %s", err) } } - if b.batchSize > len(ps.postings) { - b.batchSize = len(ps.postings) + if b.batchSize > len(b.lazyPostings.postings) { + b.batchSize = len(b.lazyPostings.postings) } b.entries = make([]seriesEntry, 0, b.batchSize) @@ -1288,6 +1299,11 @@ OUTER: } seriesMatched++ + if b.seriesLimit > 0 && seriesMatched > b.seriesLimit { + // Exit early if seriesLimit is set. + b.hasMorePostings = false + break + } s := seriesEntry{lset: completeLabelset} if b.skipChunks { b.entries = append(b.entries, s) @@ -1691,7 +1707,12 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store tracing.DoInSpan(ctx, "bucket_store_merge_all", func(ctx context.Context) { begin := time.Now() set := NewResponseDeduplicator(NewProxyResponseLoserTree(respSets...)) + i := 0 for set.Next() { + i++ + if req.Limit > 0 && i > int(req.Limit) { + break + } at := set.At() warn := at.GetWarning() if warn != "" { @@ -1942,8 +1963,13 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label names response hints").Error()) } + names := strutil.MergeSlices(sets...) + if req.Limit > 0 && len(names) > int(req.Limit) { + names = names[:req.Limit] + } + return &storepb.LabelNamesResponse{ - Names: strutil.MergeSlices(sets...), + Names: names, Hints: anyHints, }, nil } @@ -1957,7 +1983,7 @@ func (b *bucketBlock) FilterExtLabelsMatchers(matchers []*labels.Matcher) ([]*la // If value is empty string the matcher is a valid one since it's not part of external labels. if v == "" { result = append(result, m) - } else if v != "" && v != m.Value { + } else if v != "" && !m.Matches(v) { // If matcher is external label but value is different we don't want to look in block anyway. return []*labels.Matcher{}, false } @@ -2157,8 +2183,13 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label values response hints").Error()) } + vals := strutil.MergeSlices(sets...) + if req.Limit > 0 && len(vals) > int(req.Limit) { + vals = vals[:req.Limit] + } + return &storepb.LabelValuesResponse{ - Values: strutil.MergeSlices(sets...), + Values: vals, Hints: anyHints, }, nil } @@ -2605,10 +2636,15 @@ func (r *bucketIndexReader) ExpandedPostings(ctx context.Context, ms sortedMatch } // ExpandPostingsWithContext returns the postings expanded as a slice and considers context. -func ExpandPostingsWithContext(ctx context.Context, p index.Postings) (res []storage.SeriesRef, err error) { +func ExpandPostingsWithContext(ctx context.Context, p index.Postings) ([]storage.SeriesRef, error) { + res := make([]storage.SeriesRef, 0, 1024) // Pre-allocate slice with initial capacity + i := 0 for p.Next() { - if ctx.Err() != nil { - return nil, ctx.Err() + i++ + if i%checkContextEveryNIterations == 0 { + if err := ctx.Err(); err != nil { + return nil, err + } } res = append(res, p.At()) } @@ -2831,8 +2867,8 @@ func toPostingGroup(ctx context.Context, lvalsFn func(name string) ([]string, er return nil, nil, err } - for _, val := range vals { - if ctx.Err() != nil { + for i, val := range vals { + if (i+1)%checkContextEveryNIterations == 0 && ctx.Err() != nil { return nil, nil, ctx.Err() } if !m.Matches(val) { @@ -2860,8 +2896,8 @@ func toPostingGroup(ctx context.Context, lvalsFn func(name string) ([]string, er } var toAdd []string - for _, val := range vals { - if ctx.Err() != nil { + for i, val := range vals { + if (i+1)%checkContextEveryNIterations == 0 && ctx.Err() != nil { return nil, nil, ctx.Err() } if m.Matches(val) { @@ -2964,8 +3000,10 @@ func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Lab // If we have a miss, mark key to be fetched in `ptrs` slice. // Overlaps are well handled by partitioner, so we don't need to deduplicate keys. for ix, key := range keys { - if err := ctx.Err(); err != nil { - return nil, closeFns, err + if (ix+1)%checkContextEveryNIterations == 0 { + if err := ctx.Err(); err != nil { + return nil, closeFns, err + } } // Get postings for the given key from cache first. if b, ok := fromCache[key]; ok { @@ -3567,10 +3605,10 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a bufPooled, err := r.block.chunkPool.Get(r.block.estimatedMaxChunkSize) if err == nil { buf = *bufPooled + defer r.block.chunkPool.Put(&buf) } else { buf = make([]byte, r.block.estimatedMaxChunkSize) } - defer r.block.chunkPool.Put(&buf) for i, pIdx := range pIdxs { // Fast forward range reader to the next chunk start in case of sparse (for our purposes) byte range. diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go index 42e6de55a7..3a8ddbb86d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go @@ -30,7 +30,12 @@ var ( } ) -const maxInt = int(^uint(0) >> 1) +const ( + maxInt = int(^uint(0) >> 1) + + // checkContextEveryNIterations is used in some tight loops to check if the context is done. + checkContextEveryNIterations = 128 +) type InMemoryIndexCache struct { mtx sync.Mutex @@ -302,11 +307,13 @@ func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID uli blockIDKey := blockID.String() requests := 0 hit := 0 - for _, key := range keys { - if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(hit)) - return hits, misses + for i, key := range keys { + if (i+1)%checkContextEveryNIterations == 0 { + if ctx.Err() != nil { + c.commonMetrics.RequestTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(hit)) + return hits, misses + } } requests++ if b, ok := c.get(CacheKey{blockIDKey, CacheKeyPostings(key), ""}); ok { @@ -363,11 +370,13 @@ func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid. blockIDKey := blockID.String() requests := 0 hit := 0 - for _, id := range ids { - if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(hit)) - return hits, misses + for i, id := range ids { + if (i+1)%checkContextEveryNIterations == 0 { + if ctx.Err() != nil { + c.commonMetrics.RequestTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(hit)) + return hits, misses + } } requests++ if b, ok := c.get(CacheKey{blockIDKey, CacheKeySeries(id), ""}); ok { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go index a72ce0d664..38a0f61822 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go @@ -66,8 +66,8 @@ func (c *TracingIndexCache) FetchExpandedPostings(ctx context.Context, blockID u return data, exists } -// StoreSeries stores a single series. Skip intrumenting this method -// excessive spans as a single request can store millions of serieses. +// StoreSeries stores a single series. Skip instrumenting this method +// excessive spans as a single request can store millions of series. func (c *TracingIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { c.cache.StoreSeries(blockID, id, v, tenant) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/io.go b/vendor/github.com/thanos-io/thanos/pkg/store/io.go index 657f3134d2..f2356e6759 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/io.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/io.go @@ -109,7 +109,7 @@ func readByteRanges(src io.Reader, dst []byte, byteRanges byteRanges) ([]byte, e if err != nil { // We get an ErrUnexpectedEOF if EOF is reached before we fill the slice. // Due to how the reading logic works in the bucket store, we may try to overread - // the last byte range so, if the error occurrs on the last one, we consider it legit. + // the last byte range so, if the error occurs on the last one, we consider it legit. if err == io.ErrUnexpectedEOF && idx == len(byteRanges)-1 { return dst, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go index 1858b7dee4..f8363ab477 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go @@ -308,8 +308,8 @@ func fetchAndExpandPostingGroups(ctx context.Context, r *bucketIndexReader, post result := index.Without(index.Intersect(groupAdds...), index.Merge(ctx, groupRemovals...)) - if ctx.Err() != nil { - return nil, nil, ctx.Err() + if err := ctx.Err(); err != nil { + return nil, nil, err } ps, err := ExpandPostingsWithContext(ctx, result) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index 721e9ed51e..2946278978 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -173,7 +173,7 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Sto if r.SkipChunks { finalExtLset := rmLabels(extLset.Copy(), extLsetToRemove) - labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime) + labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime, int(r.Limit)) if err != nil { return err } @@ -571,12 +571,12 @@ func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesR var lbls []string if len(matchers) == 0 || p.labelCallsSupportMatchers() { - lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End) + lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } } else { - sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) + sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } @@ -642,7 +642,7 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue if len(matchers) == 0 { return &storepb.LabelValuesResponse{Values: []string{val}}, nil } - sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) + sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } @@ -653,12 +653,12 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue } if len(matchers) == 0 || p.labelCallsSupportMatchers() { - vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End) + vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } } else { - sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) + sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index 0ac1fc659c..c0c1bacc68 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -286,7 +286,7 @@ func (s *ProxyStore) TSDBInfos() []infopb.TSDBInfo { func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // tiggered by tracing span to reduce cognitive load. + // triggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -327,6 +327,7 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. r := &storepb.SeriesRequest{ MinTime: originalRequest.MinTime, MaxTime: originalRequest.MaxTime, + Limit: originalRequest.Limit, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), Aggregates: originalRequest.Aggregates, MaxResolutionWindow: originalRequest.MaxResolutionWindow, @@ -363,7 +364,13 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. level.Debug(reqLogger).Log("msg", "Series: started fanout streams", "status", strings.Join(storeDebugMsgs, ";")) respHeap := NewResponseDeduplicator(NewProxyResponseLoserTree(storeResponses...)) + + i := 0 for respHeap.Next() { + i++ + if r.Limit > 0 && i > int(r.Limit) { + break + } resp := respHeap.At() if resp.GetWarning() != "" && (r.PartialResponseDisabled || r.PartialResponseStrategy == storepb.PartialResponseStrategy_ABORT) { @@ -381,7 +388,7 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. // LabelNames returns all known label names. func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // tiggered by tracing span to reduce cognitive load. + // triggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -419,6 +426,7 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La End: originalRequest.End, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), WithoutReplicaLabels: originalRequest.WithoutReplicaLabels, + Hints: originalRequest.Hints, } var ( @@ -465,8 +473,13 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La return nil, err } + result := strutil.MergeUnsortedSlices(names...) + if originalRequest.Limit > 0 && len(result) > int(originalRequest.Limit) { + result = result[:originalRequest.Limit] + } + return &storepb.LabelNamesResponse{ - Names: strutil.MergeUnsortedSlices(names...), + Names: result, Warnings: warnings, }, nil } @@ -476,7 +489,7 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L *storepb.LabelValuesResponse, error, ) { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // tiggered by tracing span to reduce cognitive load. + // triggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -520,6 +533,7 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L End: originalRequest.End, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), WithoutReplicaLabels: originalRequest.WithoutReplicaLabels, + Limit: originalRequest.Limit, } var ( @@ -567,8 +581,13 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L return nil, err } + vals := strutil.MergeUnsortedSlices(all...) + if originalRequest.Limit > 0 && len(vals) > int(originalRequest.Limit) { + vals = vals[:originalRequest.Limit] + } + return &storepb.LabelValuesResponse{ - Values: strutil.MergeUnsortedSlices(all...), + Values: vals, Warnings: warnings, }, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go index fa002cc9b1..e2764d574a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go @@ -361,7 +361,9 @@ func newLazyRespSet( var rerr error // If timer is already stopped if t != nil && !t.Stop() { - <-t.C // Drain the channel if it was already stopped. + if t.C != nil { + <-t.C // Drain the channel if it was already stopped. + } rerr = errors.Wrapf(err, "failed to receive any data in %s from %s", l.frameTimeout, st) } else { rerr = errors.Wrapf(err, "receive series from %s", st) @@ -614,7 +616,9 @@ func newEagerRespSet( var rerr error // If timer is already stopped if t != nil && !t.Stop() { - <-t.C // Drain the channel if it was already stopped. + if t.C != nil { + <-t.C // Drain the channel if it was already stopped. + } rerr = errors.Wrapf(err, "failed to receive any data in %s from %s", l.frameTimeout, storeName) } else { rerr = errors.Wrapf(err, "receive series from %s", storeName) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go index 050b8e912f..0da00daf4d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go @@ -61,7 +61,7 @@ func SamplesFromPromqlSeries(series promql.Series) ([]Sample, []Histogram) { // HistogramProtoToHistogram extracts a (normal integer) Histogram from the // provided proto message. The caller has to make sure that the proto message -// represents an interger histogram and not a float histogram. +// represents an integer histogram and not a float histogram. // Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L626-L645 func HistogramProtoToHistogram(hp Histogram) *histogram.Histogram { if hp.IsFloatHistogram() { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go index b5e85d69d8..3ddb507327 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go @@ -305,6 +305,8 @@ type SeriesRequest struct { // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows // server supports it. WithoutReplicaLabels []string `protobuf:"bytes,14,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` + // limit is used to limit the number of results returned + Limit int64 `protobuf:"varint,15,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } @@ -659,6 +661,8 @@ type LabelNamesRequest struct { Matchers []LabelMatcher `protobuf:"bytes,6,rep,name=matchers,proto3" json:"matchers"` // same as in series request. WithoutReplicaLabels []string `protobuf:"bytes,7,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` + // limit is used to limit the number of results returned + Limit int64 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } @@ -750,6 +754,8 @@ type LabelValuesRequest struct { Matchers []LabelMatcher `protobuf:"bytes,7,rep,name=matchers,proto3" json:"matchers"` // same as in series request. WithoutReplicaLabels []string `protobuf:"bytes,8,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` + // limit is used to limit the number of results returned + Limit int64 `protobuf:"varint,9,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } @@ -850,91 +856,92 @@ func init() { func init() { proto.RegisterFile("store/storepb/rpc.proto", fileDescriptor_a938d55a388af629) } var fileDescriptor_a938d55a388af629 = []byte{ - // 1331 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x6f, 0x13, 0x47, - 0x14, 0xf7, 0x7a, 0xbd, 0xfe, 0xf3, 0x9c, 0xb8, 0x66, 0x30, 0x61, 0x63, 0x24, 0xc7, 0x75, 0x55, - 0xc9, 0x42, 0xd4, 0xa6, 0x06, 0x21, 0xb5, 0xe2, 0x92, 0x04, 0x43, 0xa2, 0x12, 0x53, 0xc6, 0x09, - 0x69, 0xa9, 0x2a, 0x6b, 0x6d, 0x4f, 0xd6, 0x2b, 0xec, 0xdd, 0x65, 0x67, 0xb6, 0x89, 0xaf, 0xad, - 0x7a, 0xab, 0xaa, 0xaa, 0x1f, 0xa1, 0x9f, 0x86, 0x23, 0xc7, 0xaa, 0x07, 0xd4, 0xc2, 0xbd, 0x9f, - 0xa1, 0x9a, 0x3f, 0xbb, 0xf6, 0xa6, 0x21, 0x08, 0x91, 0x4b, 0x34, 0xef, 0xf7, 0x7b, 0xf3, 0xe6, - 0xfd, 0xcf, 0x1a, 0xae, 0x52, 0xe6, 0x05, 0xa4, 0x2d, 0xfe, 0xfa, 0xc3, 0x76, 0xe0, 0x8f, 0x5a, - 0x7e, 0xe0, 0x31, 0x0f, 0x65, 0xd9, 0xc4, 0x72, 0x3d, 0x5a, 0x5d, 0x4f, 0x2a, 0xb0, 0xb9, 0x4f, - 0xa8, 0x54, 0xa9, 0x56, 0x6c, 0xcf, 0xf6, 0xc4, 0xb1, 0xcd, 0x4f, 0x0a, 0xad, 0x27, 0x2f, 0xf8, - 0x81, 0x37, 0x3b, 0x75, 0x4f, 0x99, 0x9c, 0x5a, 0x43, 0x32, 0x3d, 0x4d, 0xd9, 0x9e, 0x67, 0x4f, - 0x49, 0x5b, 0x48, 0xc3, 0xf0, 0xa8, 0x6d, 0xb9, 0x73, 0x49, 0x35, 0x3e, 0x82, 0xd5, 0xc3, 0xc0, - 0x61, 0x04, 0x13, 0xea, 0x7b, 0x2e, 0x25, 0x8d, 0x9f, 0x34, 0x58, 0x51, 0xc8, 0xf3, 0x90, 0x50, - 0x86, 0x36, 0x01, 0x98, 0x33, 0x23, 0x94, 0x04, 0x0e, 0xa1, 0xa6, 0x56, 0xd7, 0x9b, 0xc5, 0xce, - 0x35, 0x7e, 0x7b, 0x46, 0xd8, 0x84, 0x84, 0x74, 0x30, 0xf2, 0xfc, 0x79, 0x6b, 0xdf, 0x99, 0x91, - 0xbe, 0x50, 0xd9, 0xca, 0xbc, 0x78, 0xb5, 0x91, 0xc2, 0x4b, 0x97, 0xd0, 0x1a, 0x64, 0x19, 0x71, - 0x2d, 0x97, 0x99, 0xe9, 0xba, 0xd6, 0x2c, 0x60, 0x25, 0x21, 0x13, 0x72, 0x01, 0xf1, 0xa7, 0xce, - 0xc8, 0x32, 0xf5, 0xba, 0xd6, 0xd4, 0x71, 0x24, 0x36, 0x56, 0xa1, 0xb8, 0xeb, 0x1e, 0x79, 0xca, - 0x87, 0xc6, 0xef, 0x69, 0x58, 0x91, 0xb2, 0xf4, 0x12, 0x8d, 0x20, 0x2b, 0x02, 0x8d, 0x1c, 0x5a, - 0x6d, 0xc9, 0xc4, 0xb6, 0x1e, 0x72, 0x74, 0xeb, 0x2e, 0x77, 0xe1, 0xaf, 0x57, 0x1b, 0xb7, 0x6d, - 0x87, 0x4d, 0xc2, 0x61, 0x6b, 0xe4, 0xcd, 0xda, 0x52, 0xe1, 0x33, 0xc7, 0x53, 0xa7, 0xb6, 0xff, - 0xcc, 0x6e, 0x27, 0x72, 0xd6, 0x7a, 0x2a, 0x6e, 0x63, 0x65, 0x1a, 0xad, 0x43, 0x7e, 0xe6, 0xb8, - 0x03, 0x1e, 0x88, 0x70, 0x5c, 0xc7, 0xb9, 0x99, 0xe3, 0xf2, 0x48, 0x05, 0x65, 0x9d, 0x48, 0x4a, - 0xb9, 0x3e, 0xb3, 0x4e, 0x04, 0xd5, 0x86, 0x82, 0xb0, 0xba, 0x3f, 0xf7, 0x89, 0x99, 0xa9, 0x6b, - 0xcd, 0x52, 0xe7, 0x52, 0xe4, 0x5d, 0x3f, 0x22, 0xf0, 0x42, 0x07, 0xdd, 0x01, 0x10, 0x0f, 0x0e, - 0x28, 0x61, 0xd4, 0x34, 0x44, 0x3c, 0xf1, 0x0d, 0xe9, 0x52, 0x9f, 0x30, 0x95, 0xd6, 0xc2, 0x54, - 0xc9, 0xb4, 0xf1, 0x8b, 0x01, 0xab, 0x32, 0xe5, 0x51, 0xa9, 0x96, 0x1d, 0xd6, 0xde, 0xee, 0x70, - 0x3a, 0xe9, 0xf0, 0x1d, 0x4e, 0xb1, 0xd1, 0x84, 0x04, 0xd4, 0xd4, 0xc5, 0xeb, 0x95, 0x44, 0x36, - 0xf7, 0x24, 0xa9, 0x1c, 0x88, 0x75, 0x51, 0x07, 0xae, 0x70, 0x93, 0x01, 0xa1, 0xde, 0x34, 0x64, - 0x8e, 0xe7, 0x0e, 0x8e, 0x1d, 0x77, 0xec, 0x1d, 0x8b, 0xa0, 0x75, 0x7c, 0x79, 0x66, 0x9d, 0xe0, - 0x98, 0x3b, 0x14, 0x14, 0xba, 0x01, 0x60, 0xd9, 0x76, 0x40, 0x6c, 0x8b, 0x11, 0x19, 0x6b, 0xa9, - 0xb3, 0x12, 0xbd, 0xb6, 0x69, 0xdb, 0x01, 0x5e, 0xe2, 0xd1, 0x97, 0xb0, 0xee, 0x5b, 0x01, 0x73, - 0xac, 0x29, 0x7f, 0x45, 0x54, 0x7e, 0x30, 0x76, 0xa8, 0x35, 0x9c, 0x92, 0xb1, 0x99, 0xad, 0x6b, - 0xcd, 0x3c, 0xbe, 0xaa, 0x14, 0xa2, 0xce, 0xb8, 0xa7, 0x68, 0xf4, 0xdd, 0x19, 0x77, 0x29, 0x0b, - 0x2c, 0x46, 0xec, 0xb9, 0x99, 0x13, 0x65, 0xd9, 0x88, 0x1e, 0xfe, 0x3a, 0x69, 0xa3, 0xaf, 0xd4, - 0xfe, 0x67, 0x3c, 0x22, 0xd0, 0x06, 0x14, 0xe9, 0x33, 0xc7, 0x1f, 0x8c, 0x26, 0xa1, 0xfb, 0x8c, - 0x9a, 0x79, 0xe1, 0x0a, 0x70, 0x68, 0x5b, 0x20, 0xe8, 0x3a, 0x18, 0x13, 0xc7, 0x65, 0xd4, 0x2c, - 0xd4, 0x35, 0x91, 0x50, 0x39, 0x81, 0xad, 0x68, 0x02, 0x5b, 0x9b, 0xee, 0x1c, 0x4b, 0x15, 0x84, - 0x20, 0x43, 0x19, 0xf1, 0x4d, 0x10, 0x69, 0x13, 0x67, 0x54, 0x01, 0x23, 0xb0, 0x5c, 0x9b, 0x98, - 0x45, 0x01, 0x4a, 0x01, 0xdd, 0x82, 0xe2, 0xf3, 0x90, 0x04, 0xf3, 0x81, 0xb4, 0xbd, 0x22, 0x6c, - 0xa3, 0x28, 0x8a, 0xc7, 0x9c, 0xda, 0xe1, 0x0c, 0x86, 0xe7, 0xf1, 0x19, 0xdd, 0x04, 0xa0, 0x13, - 0x2b, 0x18, 0x0f, 0x1c, 0xf7, 0xc8, 0x33, 0x57, 0xc5, 0x9d, 0x45, 0x43, 0x72, 0x46, 0x4c, 0x56, - 0x81, 0x46, 0x47, 0x74, 0x1b, 0xd6, 0x8e, 0x1d, 0x36, 0xf1, 0x42, 0x36, 0x50, 0xf3, 0x38, 0x50, - 0xc3, 0x56, 0xaa, 0xeb, 0xcd, 0x02, 0xae, 0x28, 0x16, 0x4b, 0x52, 0x34, 0x09, 0x6d, 0xfc, 0xa1, - 0x01, 0x2c, 0x5c, 0x10, 0x29, 0x62, 0xc4, 0x1f, 0xcc, 0x9c, 0xe9, 0xd4, 0xa1, 0xaa, 0x1d, 0x81, - 0x43, 0x7b, 0x02, 0x41, 0x75, 0xc8, 0x1c, 0x85, 0xee, 0x48, 0x74, 0x63, 0x71, 0xd1, 0x04, 0xf7, - 0x43, 0x77, 0x84, 0x05, 0x83, 0x6e, 0x40, 0xde, 0x0e, 0xbc, 0xd0, 0x77, 0x5c, 0x5b, 0xf4, 0x54, - 0xb1, 0x53, 0x8e, 0xb4, 0x1e, 0x28, 0x1c, 0xc7, 0x1a, 0xe8, 0x93, 0x28, 0x65, 0x86, 0x50, 0x8d, - 0x37, 0x02, 0xe6, 0xa0, 0xca, 0x60, 0xe3, 0x18, 0x0a, 0x71, 0xc8, 0xc2, 0x45, 0x95, 0x99, 0x31, - 0x39, 0x89, 0x5d, 0x94, 0xfc, 0x98, 0x9c, 0xa0, 0x8f, 0x61, 0x85, 0x79, 0xcc, 0x9a, 0x0e, 0x04, - 0x46, 0xd5, 0xe0, 0x14, 0x05, 0x26, 0xcc, 0x50, 0x54, 0x82, 0xf4, 0x70, 0x2e, 0x56, 0x40, 0x1e, - 0xa7, 0x87, 0x73, 0xbe, 0xea, 0x54, 0xae, 0x32, 0x22, 0x57, 0x4a, 0x6a, 0x54, 0x21, 0xc3, 0x23, - 0xe3, 0xc5, 0x76, 0x2d, 0x35, 0x9e, 0x05, 0x2c, 0xce, 0x8d, 0x0e, 0xe4, 0xa3, 0x78, 0x94, 0x3d, - 0xed, 0x0c, 0x7b, 0x7a, 0xc2, 0xde, 0x06, 0x18, 0x22, 0x30, 0xae, 0x90, 0x48, 0xb1, 0x92, 0x1a, - 0xbf, 0x6a, 0x50, 0x8a, 0xb6, 0x83, 0x5a, 0x9a, 0x4d, 0xc8, 0xc6, 0x5b, 0x9c, 0xa7, 0xa8, 0x14, - 0x77, 0x81, 0x40, 0x77, 0x52, 0x58, 0xf1, 0xa8, 0x0a, 0xb9, 0x63, 0x2b, 0x70, 0x79, 0xe2, 0xc5, - 0xc6, 0xde, 0x49, 0xe1, 0x08, 0x40, 0x37, 0xa2, 0xd6, 0xd6, 0xdf, 0xde, 0xda, 0x3b, 0x29, 0xd5, - 0xdc, 0x5b, 0x79, 0xc8, 0x06, 0x84, 0x86, 0x53, 0xd6, 0xf8, 0x37, 0x0d, 0x97, 0x44, 0xab, 0xf4, - 0xac, 0xd9, 0x62, 0x65, 0x9d, 0x3b, 0xe2, 0xda, 0x07, 0x8c, 0x78, 0xfa, 0x03, 0x47, 0xbc, 0x02, - 0x06, 0x65, 0x56, 0xc0, 0xd4, 0x7a, 0x97, 0x02, 0x2a, 0x83, 0x4e, 0xdc, 0xb1, 0xda, 0x70, 0xfc, - 0xb8, 0x98, 0x74, 0xe3, 0xdd, 0x93, 0xbe, 0xbc, 0x69, 0xb3, 0xef, 0xb1, 0x69, 0xdf, 0x3e, 0x90, - 0xb9, 0x73, 0x06, 0x32, 0x00, 0xb4, 0x9c, 0x6f, 0xd5, 0x04, 0x15, 0x30, 0x78, 0xd3, 0xc9, 0x7f, - 0x9c, 0x05, 0x2c, 0x05, 0x54, 0x85, 0xbc, 0xaa, 0x2f, 0xef, 0x72, 0x4e, 0xc4, 0xf2, 0x22, 0x42, - 0xfd, 0x9d, 0x11, 0x36, 0x7e, 0xd6, 0xd5, 0xa3, 0x4f, 0xac, 0x69, 0xb8, 0xa8, 0x72, 0x05, 0x0c, - 0xe1, 0xb0, 0x6a, 0x7b, 0x29, 0x9c, 0x5f, 0xfb, 0xf4, 0x07, 0xd4, 0x5e, 0xbf, 0xa8, 0xda, 0x67, - 0xce, 0xa8, 0xbd, 0x71, 0x46, 0xed, 0xb3, 0xef, 0x57, 0xfb, 0xdc, 0x85, 0xd4, 0x3e, 0x7f, 0x4e, - 0xed, 0x43, 0xb8, 0x9c, 0x28, 0x83, 0x2a, 0xfe, 0x1a, 0x64, 0x7f, 0x10, 0x88, 0xaa, 0xbe, 0x92, - 0x2e, 0xaa, 0xfc, 0xd7, 0xbf, 0x87, 0x42, 0xfc, 0x89, 0x83, 0x8a, 0x90, 0x3b, 0xe8, 0x7d, 0xd5, - 0x7b, 0x74, 0xd8, 0x2b, 0xa7, 0x50, 0x01, 0x8c, 0xc7, 0x07, 0x5d, 0xfc, 0x6d, 0x59, 0x43, 0x79, - 0xc8, 0xe0, 0x83, 0x87, 0xdd, 0x72, 0x9a, 0x6b, 0xf4, 0x77, 0xef, 0x75, 0xb7, 0x37, 0x71, 0x59, - 0xe7, 0x1a, 0xfd, 0xfd, 0x47, 0xb8, 0x5b, 0xce, 0x70, 0x1c, 0x77, 0xb7, 0xbb, 0xbb, 0x4f, 0xba, - 0x65, 0x83, 0xe3, 0xf7, 0xba, 0x5b, 0x07, 0x0f, 0xca, 0xd9, 0xeb, 0x5b, 0x90, 0xe1, 0xdf, 0x08, - 0x28, 0x07, 0x3a, 0xde, 0x3c, 0x94, 0x56, 0xb7, 0x1f, 0x1d, 0xf4, 0xf6, 0xcb, 0x1a, 0xc7, 0xfa, - 0x07, 0x7b, 0xe5, 0x34, 0x3f, 0xec, 0xed, 0xf6, 0xca, 0xba, 0x38, 0x6c, 0x7e, 0x23, 0xcd, 0x09, - 0xad, 0x2e, 0x2e, 0x1b, 0x9d, 0x1f, 0xd3, 0x60, 0x08, 0x1f, 0xd1, 0xe7, 0x90, 0x11, 0xff, 0x06, - 0x2e, 0x47, 0x75, 0x58, 0xfa, 0xe2, 0xac, 0x56, 0x92, 0xa0, 0xca, 0xdf, 0x17, 0x90, 0x95, 0xbb, - 0x12, 0x5d, 0x49, 0xee, 0xce, 0xe8, 0xda, 0xda, 0x69, 0x58, 0x5e, 0xbc, 0xa9, 0xa1, 0x6d, 0x80, - 0xc5, 0x34, 0xa2, 0xf5, 0x44, 0xed, 0x97, 0x37, 0x62, 0xb5, 0x7a, 0x16, 0xa5, 0xde, 0xbf, 0x0f, - 0xc5, 0xa5, 0xb2, 0xa2, 0xa4, 0x6a, 0x62, 0xe4, 0xaa, 0xd7, 0xce, 0xe4, 0xa4, 0x9d, 0x4e, 0x0f, - 0x4a, 0xe2, 0x1b, 0x9f, 0xcf, 0x92, 0x4c, 0xc6, 0x5d, 0x28, 0x62, 0x32, 0xf3, 0x18, 0x11, 0x38, - 0x8a, 0xc3, 0x5f, 0xfe, 0x29, 0x50, 0xbd, 0x72, 0x0a, 0x55, 0x3f, 0x19, 0x52, 0x5b, 0x9f, 0xbe, - 0xf8, 0xa7, 0x96, 0x7a, 0xf1, 0xba, 0xa6, 0xbd, 0x7c, 0x5d, 0xd3, 0xfe, 0x7e, 0x5d, 0xd3, 0x7e, - 0x7b, 0x53, 0x4b, 0xbd, 0x7c, 0x53, 0x4b, 0xfd, 0xf9, 0xa6, 0x96, 0x7a, 0x9a, 0x53, 0xbf, 0x5a, - 0x86, 0x59, 0xd1, 0x33, 0xb7, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x14, 0xa2, 0x0f, 0x1f, - 0x0d, 0x00, 0x00, + // 1351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4d, 0x6f, 0xdb, 0x46, + 0x13, 0x16, 0x45, 0x51, 0x1f, 0x23, 0x5b, 0x51, 0x36, 0x8a, 0x43, 0x2b, 0x80, 0xac, 0x57, 0x2f, + 0x0a, 0x08, 0x41, 0x2a, 0xa5, 0x4a, 0x10, 0xa0, 0x45, 0x2e, 0xb6, 0xa3, 0xc4, 0x46, 0x63, 0xa5, + 0x59, 0xd9, 0x71, 0x9b, 0xa2, 0x10, 0x28, 0x69, 0x4d, 0x11, 0xa1, 0x48, 0x86, 0xbb, 0xac, 0xad, + 0x6b, 0xdb, 0x73, 0x51, 0xf4, 0xda, 0x5b, 0x7e, 0x4d, 0x8e, 0x39, 0x16, 0x3d, 0x04, 0x6d, 0xf2, + 0x47, 0x8a, 0xfd, 0xa0, 0x24, 0xba, 0xce, 0x17, 0xe2, 0x8b, 0xb1, 0xf3, 0x3c, 0xb3, 0xb3, 0xb3, + 0xb3, 0xcf, 0x8c, 0x45, 0xb8, 0x42, 0x99, 0x1f, 0x92, 0xb6, 0xf8, 0x1b, 0x0c, 0xdb, 0x61, 0x30, + 0x6a, 0x05, 0xa1, 0xcf, 0x7c, 0x94, 0x65, 0x13, 0xcb, 0xf3, 0x69, 0x75, 0x3d, 0xe9, 0xc0, 0x66, + 0x01, 0xa1, 0xd2, 0xa5, 0x5a, 0xb1, 0x7d, 0xdb, 0x17, 0xcb, 0x36, 0x5f, 0x29, 0xb4, 0x9e, 0xdc, + 0x10, 0x84, 0xfe, 0xf4, 0xd4, 0x3e, 0x15, 0xd2, 0xb5, 0x86, 0xc4, 0x3d, 0x4d, 0xd9, 0xbe, 0x6f, + 0xbb, 0xa4, 0x2d, 0xac, 0x61, 0x74, 0xd4, 0xb6, 0xbc, 0x99, 0xa4, 0x1a, 0x17, 0x60, 0xf5, 0x30, + 0x74, 0x18, 0xc1, 0x84, 0x06, 0xbe, 0x47, 0x49, 0xe3, 0x67, 0x0d, 0x56, 0x14, 0xf2, 0x2c, 0x22, + 0x94, 0xa1, 0x4d, 0x00, 0xe6, 0x4c, 0x09, 0x25, 0xa1, 0x43, 0xa8, 0xa9, 0xd5, 0xf5, 0x66, 0xb1, + 0x73, 0x95, 0xef, 0x9e, 0x12, 0x36, 0x21, 0x11, 0x1d, 0x8c, 0xfc, 0x60, 0xd6, 0xda, 0x77, 0xa6, + 0xa4, 0x2f, 0x5c, 0xb6, 0x32, 0x2f, 0x5e, 0x6d, 0xa4, 0xf0, 0xd2, 0x26, 0xb4, 0x06, 0x59, 0x46, + 0x3c, 0xcb, 0x63, 0x66, 0xba, 0xae, 0x35, 0x0b, 0x58, 0x59, 0xc8, 0x84, 0x5c, 0x48, 0x02, 0xd7, + 0x19, 0x59, 0xa6, 0x5e, 0xd7, 0x9a, 0x3a, 0x8e, 0xcd, 0xc6, 0x2a, 0x14, 0x77, 0xbd, 0x23, 0x5f, + 0xe5, 0xd0, 0xf8, 0x3d, 0x0d, 0x2b, 0xd2, 0x96, 0x59, 0xa2, 0x11, 0x64, 0xc5, 0x45, 0xe3, 0x84, + 0x56, 0x5b, 0xb2, 0xb0, 0xad, 0x07, 0x1c, 0xdd, 0xba, 0xc3, 0x53, 0xf8, 0xeb, 0xd5, 0xc6, 0x2d, + 0xdb, 0x61, 0x93, 0x68, 0xd8, 0x1a, 0xf9, 0xd3, 0xb6, 0x74, 0xf8, 0xdc, 0xf1, 0xd5, 0xaa, 0x1d, + 0x3c, 0xb5, 0xdb, 0x89, 0x9a, 0xb5, 0x9e, 0x88, 0xdd, 0x58, 0x85, 0x46, 0xeb, 0x90, 0x9f, 0x3a, + 0xde, 0x80, 0x5f, 0x44, 0x24, 0xae, 0xe3, 0xdc, 0xd4, 0xf1, 0xf8, 0x4d, 0x05, 0x65, 0x9d, 0x48, + 0x4a, 0xa5, 0x3e, 0xb5, 0x4e, 0x04, 0xd5, 0x86, 0x82, 0x88, 0xba, 0x3f, 0x0b, 0x88, 0x99, 0xa9, + 0x6b, 0xcd, 0x52, 0xe7, 0x62, 0x9c, 0x5d, 0x3f, 0x26, 0xf0, 0xc2, 0x07, 0xdd, 0x06, 0x10, 0x07, + 0x0e, 0x28, 0x61, 0xd4, 0x34, 0xc4, 0x7d, 0xe6, 0x3b, 0x64, 0x4a, 0x7d, 0xc2, 0x54, 0x59, 0x0b, + 0xae, 0xb2, 0x69, 0xe3, 0xb9, 0x01, 0xab, 0xb2, 0xe4, 0xf1, 0x53, 0x2d, 0x27, 0xac, 0xbd, 0x3d, + 0xe1, 0x74, 0x32, 0xe1, 0xdb, 0x9c, 0x62, 0xa3, 0x09, 0x09, 0xa9, 0xa9, 0x8b, 0xd3, 0x2b, 0x89, + 0x6a, 0xee, 0x49, 0x52, 0x25, 0x30, 0xf7, 0x45, 0x1d, 0xb8, 0xcc, 0x43, 0x86, 0x84, 0xfa, 0x6e, + 0xc4, 0x1c, 0xdf, 0x1b, 0x1c, 0x3b, 0xde, 0xd8, 0x3f, 0x16, 0x97, 0xd6, 0xf1, 0xa5, 0xa9, 0x75, + 0x82, 0xe7, 0xdc, 0xa1, 0xa0, 0xd0, 0x75, 0x00, 0xcb, 0xb6, 0x43, 0x62, 0x5b, 0x8c, 0xc8, 0xbb, + 0x96, 0x3a, 0x2b, 0xf1, 0x69, 0x9b, 0xb6, 0x1d, 0xe2, 0x25, 0x1e, 0x7d, 0x05, 0xeb, 0x81, 0x15, + 0x32, 0xc7, 0x72, 0xf9, 0x29, 0xe2, 0xe5, 0x07, 0x63, 0x87, 0x5a, 0x43, 0x97, 0x8c, 0xcd, 0x6c, + 0x5d, 0x6b, 0xe6, 0xf1, 0x15, 0xe5, 0x10, 0x2b, 0xe3, 0xae, 0xa2, 0xd1, 0xf7, 0x67, 0xec, 0xa5, + 0x2c, 0xb4, 0x18, 0xb1, 0x67, 0x66, 0x4e, 0x3c, 0xcb, 0x46, 0x7c, 0xf0, 0x37, 0xc9, 0x18, 0x7d, + 0xe5, 0xf6, 0x9f, 0xe0, 0x31, 0x81, 0x36, 0xa0, 0x48, 0x9f, 0x3a, 0xc1, 0x60, 0x34, 0x89, 0xbc, + 0xa7, 0xd4, 0xcc, 0x8b, 0x54, 0x80, 0x43, 0xdb, 0x02, 0x41, 0xd7, 0xc0, 0x98, 0x38, 0x1e, 0xa3, + 0x66, 0xa1, 0xae, 0x89, 0x82, 0xca, 0x0e, 0x6c, 0xc5, 0x1d, 0xd8, 0xda, 0xf4, 0x66, 0x58, 0xba, + 0x20, 0x04, 0x19, 0xca, 0x48, 0x60, 0x82, 0x28, 0x9b, 0x58, 0xa3, 0x0a, 0x18, 0xa1, 0xe5, 0xd9, + 0xc4, 0x2c, 0x0a, 0x50, 0x1a, 0xe8, 0x26, 0x14, 0x9f, 0x45, 0x24, 0x9c, 0x0d, 0x64, 0xec, 0x15, + 0x11, 0x1b, 0xc5, 0xb7, 0x78, 0xc4, 0xa9, 0x1d, 0xce, 0x60, 0x78, 0x36, 0x5f, 0xa3, 0x1b, 0x00, + 0x74, 0x62, 0x85, 0xe3, 0x81, 0xe3, 0x1d, 0xf9, 0xe6, 0xaa, 0xd8, 0xb3, 0x10, 0x24, 0x67, 0x44, + 0x67, 0x15, 0x68, 0xbc, 0x44, 0xb7, 0x60, 0xed, 0xd8, 0x61, 0x13, 0x3f, 0x62, 0x03, 0xd5, 0x8f, + 0x03, 0xd5, 0x6c, 0xa5, 0xba, 0xde, 0x2c, 0xe0, 0x8a, 0x62, 0xb1, 0x24, 0x1f, 0xc8, 0x6e, 0xa9, + 0x80, 0xe1, 0x3a, 0x53, 0x87, 0x99, 0x17, 0x64, 0xca, 0xc2, 0x68, 0x3c, 0xd7, 0x00, 0x16, 0x89, + 0x89, 0xc2, 0x31, 0x12, 0x0c, 0xa6, 0x8e, 0xeb, 0x3a, 0x54, 0x89, 0x14, 0x38, 0xb4, 0x27, 0x10, + 0x54, 0x87, 0xcc, 0x51, 0xe4, 0x8d, 0x84, 0x46, 0x8b, 0x0b, 0x69, 0xdc, 0x8b, 0xbc, 0x11, 0x16, + 0x0c, 0xba, 0x0e, 0x79, 0x3b, 0xf4, 0xa3, 0xc0, 0xf1, 0x6c, 0xa1, 0xb4, 0x62, 0xa7, 0x1c, 0x7b, + 0xdd, 0x57, 0x38, 0x9e, 0x7b, 0xa0, 0xff, 0xc7, 0x85, 0x34, 0x84, 0xeb, 0x7c, 0x4e, 0x60, 0x0e, + 0xaa, 0xba, 0x36, 0x8e, 0xa1, 0x30, 0x2f, 0x84, 0x48, 0x51, 0xd5, 0x6b, 0x4c, 0x4e, 0xe6, 0x29, + 0x4a, 0x7e, 0x4c, 0x4e, 0xd0, 0xff, 0x60, 0x85, 0xf9, 0xcc, 0x72, 0x07, 0x02, 0xa3, 0xaa, 0x9d, + 0x8a, 0x02, 0x13, 0x61, 0x28, 0x2a, 0x41, 0x7a, 0x38, 0x13, 0x83, 0x21, 0x8f, 0xd3, 0xc3, 0x19, + 0x1f, 0x80, 0xaa, 0x82, 0x19, 0x51, 0x41, 0x65, 0x35, 0xaa, 0x90, 0xe1, 0x37, 0xe3, 0x12, 0xf0, + 0x2c, 0xd5, 0xb4, 0x05, 0x2c, 0xd6, 0x8d, 0x0e, 0xe4, 0xe3, 0xfb, 0xa8, 0x78, 0xda, 0x19, 0xf1, + 0xf4, 0x44, 0xbc, 0x0d, 0x30, 0xc4, 0xc5, 0xb8, 0x43, 0xa2, 0xc4, 0xca, 0x6a, 0xfc, 0xaa, 0x41, + 0x29, 0x9e, 0x19, 0x6a, 0x94, 0x36, 0x21, 0x3b, 0x9f, 0xed, 0xbc, 0x44, 0xa5, 0xb9, 0x36, 0x04, + 0xba, 0x93, 0xc2, 0x8a, 0x47, 0x55, 0xc8, 0x1d, 0x5b, 0xa1, 0xc7, 0x0b, 0x2f, 0xe6, 0xf8, 0x4e, + 0x0a, 0xc7, 0x00, 0xba, 0x1e, 0x0b, 0x5e, 0x7f, 0xbb, 0xe0, 0x77, 0x52, 0x4a, 0xf2, 0x5b, 0x79, + 0xc8, 0x86, 0x84, 0x46, 0x2e, 0x6b, 0xfc, 0xa2, 0xc3, 0x45, 0x21, 0xa0, 0x9e, 0x35, 0x5d, 0x0c, + 0xb2, 0x77, 0x36, 0xbe, 0xf6, 0x09, 0x8d, 0x9f, 0xfe, 0xc4, 0xc6, 0xaf, 0x80, 0x41, 0x99, 0x15, + 0x32, 0x35, 0xf4, 0xa5, 0x81, 0xca, 0xa0, 0x13, 0x6f, 0xac, 0xe6, 0x1e, 0x5f, 0x2e, 0xfa, 0xdf, + 0x78, 0x7f, 0xff, 0x2f, 0xcf, 0xdf, 0xec, 0x47, 0xcc, 0xdf, 0xb7, 0xb7, 0x69, 0xee, 0x43, 0xda, + 0x34, 0xbf, 0xdc, 0xa6, 0x21, 0xa0, 0xe5, 0x57, 0x50, 0xd2, 0xa8, 0x80, 0xc1, 0xa5, 0x28, 0xff, + 0xc9, 0x16, 0xb0, 0x34, 0x50, 0x15, 0xf2, 0xea, 0xd5, 0xb9, 0xf6, 0x39, 0x31, 0xb7, 0x17, 0xf7, + 0xd6, 0xdf, 0x7b, 0xef, 0xc6, 0x1f, 0xba, 0x3a, 0xf4, 0xb1, 0xe5, 0x46, 0x8b, 0xb7, 0xe7, 0x09, + 0x72, 0x54, 0x35, 0x83, 0x34, 0xde, 0xad, 0x88, 0xf4, 0x27, 0x28, 0x42, 0x3f, 0x2f, 0x45, 0x64, + 0xce, 0x50, 0x84, 0x71, 0x86, 0x22, 0xb2, 0x1f, 0xa7, 0x88, 0xdc, 0xb9, 0x28, 0x22, 0xff, 0x21, + 0x8a, 0x28, 0x2c, 0x2b, 0x22, 0x82, 0x4b, 0x89, 0xc7, 0x51, 0x92, 0x58, 0x83, 0xec, 0x8f, 0x02, + 0x51, 0x9a, 0x50, 0xd6, 0x79, 0x89, 0xe2, 0xda, 0x0f, 0x50, 0x98, 0xff, 0x48, 0x42, 0x45, 0xc8, + 0x1d, 0xf4, 0xbe, 0xee, 0x3d, 0x3c, 0xec, 0x95, 0x53, 0xa8, 0x00, 0xc6, 0xa3, 0x83, 0x2e, 0xfe, + 0xae, 0xac, 0xa1, 0x3c, 0x64, 0xf0, 0xc1, 0x83, 0x6e, 0x39, 0xcd, 0x3d, 0xfa, 0xbb, 0x77, 0xbb, + 0xdb, 0x9b, 0xb8, 0xac, 0x73, 0x8f, 0xfe, 0xfe, 0x43, 0xdc, 0x2d, 0x67, 0x38, 0x8e, 0xbb, 0xdb, + 0xdd, 0xdd, 0xc7, 0xdd, 0xb2, 0xc1, 0xf1, 0xbb, 0xdd, 0xad, 0x83, 0xfb, 0xe5, 0xec, 0xb5, 0x2d, + 0xc8, 0xf0, 0x5f, 0x19, 0x28, 0x07, 0x3a, 0xde, 0x3c, 0x94, 0x51, 0xb7, 0x1f, 0x1e, 0xf4, 0xf6, + 0xcb, 0x1a, 0xc7, 0xfa, 0x07, 0x7b, 0xe5, 0x34, 0x5f, 0xec, 0xed, 0xf6, 0xca, 0xba, 0x58, 0x6c, + 0x7e, 0x2b, 0xc3, 0x09, 0xaf, 0x2e, 0x2e, 0x1b, 0x9d, 0x9f, 0xd2, 0x60, 0x88, 0x1c, 0xd1, 0x17, + 0x90, 0x11, 0xff, 0x32, 0x2e, 0xc5, 0xaf, 0xb3, 0xf4, 0x9b, 0xb5, 0x5a, 0x49, 0x82, 0xaa, 0x7e, + 0x5f, 0x42, 0x56, 0xce, 0x55, 0x74, 0x39, 0x39, 0x67, 0xe3, 0x6d, 0x6b, 0xa7, 0x61, 0xb9, 0xf1, + 0x86, 0x86, 0xb6, 0x01, 0x16, 0x3d, 0x8a, 0xd6, 0x13, 0x8a, 0x58, 0x9e, 0x9e, 0xd5, 0xea, 0x59, + 0x94, 0x3a, 0xff, 0x1e, 0x14, 0x97, 0x9e, 0x15, 0x25, 0x5d, 0x13, 0x8d, 0x58, 0xbd, 0x7a, 0x26, + 0x27, 0xe3, 0x74, 0x7a, 0x50, 0x12, 0x5f, 0x09, 0xbc, 0xc3, 0x64, 0x31, 0xee, 0x40, 0x11, 0x93, + 0xa9, 0xcf, 0x88, 0xc0, 0xd1, 0xfc, 0xfa, 0xcb, 0x1f, 0x13, 0xd5, 0xcb, 0xa7, 0x50, 0xf5, 0xd1, + 0x91, 0xda, 0xfa, 0xec, 0xc5, 0x3f, 0xb5, 0xd4, 0x8b, 0xd7, 0x35, 0xed, 0xe5, 0xeb, 0x9a, 0xf6, + 0xf7, 0xeb, 0x9a, 0xf6, 0xdb, 0x9b, 0x5a, 0xea, 0xe5, 0x9b, 0x5a, 0xea, 0xcf, 0x37, 0xb5, 0xd4, + 0x93, 0x9c, 0xfa, 0xee, 0x19, 0x66, 0x85, 0x66, 0x6e, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x22, + 0xcb, 0x99, 0x7e, 0x61, 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1436,6 +1443,11 @@ func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x78 + } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -1890,6 +1902,11 @@ func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x40 + } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -2026,6 +2043,11 @@ func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x48 + } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -2291,6 +2313,9 @@ func (m *SeriesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } return n } @@ -2465,6 +2490,9 @@ func (m *LabelNamesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } return n } @@ -2531,6 +2559,9 @@ func (m *LabelValuesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } return n } @@ -3401,6 +3432,25 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -4352,6 +4402,25 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -4763,6 +4832,25 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto index 2a9e9e3eaf..a15e5b6f8e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto @@ -137,6 +137,9 @@ message SeriesRequest { // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows // server supports it. repeated string without_replica_labels = 14; + + // limit is used to limit the number of results returned + int64 limit = 15; } // QueryHints represents hints from PromQL that might help to @@ -235,6 +238,9 @@ message LabelNamesRequest { // same as in series request. repeated string without_replica_labels = 7; + + // limit is used to limit the number of results returned + int64 limit = 8; } message LabelNamesResponse { @@ -268,6 +274,9 @@ message LabelValuesRequest { // same as in series request. repeated string without_replica_labels = 8; + + // limit is used to limit the number of results returned + int64 limit = 9; } message LabelValuesResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index b19c131b54..6dd18af0a8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -220,7 +220,12 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Store_Ser defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb chunk querier series") } - set := q.Select(srv.Context(), true, nil, matchers...) + hints := &storage.SelectHints{ + Start: r.MinTime, + End: r.MaxTime, + Limit: int(r.Limit), + } + set := q.Select(srv.Context(), true, hints, matchers...) shardMatcher := r.ShardInfo.Matcher(&s.buffers) defer shardMatcher.Close() @@ -328,7 +333,10 @@ func (s *TSDBStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest } defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label names") - res, _, err := q.LabelNames(ctx, nil, matchers...) + hints := &storage.LabelHints{ + Limit: int(r.Limit), + } + res, _, err := q.LabelNames(ctx, hints, matchers...) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -396,6 +404,7 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque Start: r.Start, End: r.End, Func: "series", + Limit: int(r.Limit), } set := q.Select(ctx, false, hints, matchers...) @@ -405,7 +414,10 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque return &storepb.LabelValuesResponse{}, nil } - res, _, err := q.LabelValues(ctx, r.Label, nil, matchers...) + hints := &storage.LabelHints{ + Limit: int(r.Limit), + } + res, _, err := q.LabelValues(ctx, r.Label, hints, matchers...) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index deea149645..6aae83bfd2 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -12,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index c1015a9ecc..f0a9bb9efe 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -100,7 +100,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index c64f8beca7..d01bdccf40 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -204,10 +205,15 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) @@ -225,9 +231,10 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http if rww.statusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + h.responseBytesCounter.Add(ctx, rww.written, addOpts...) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9be3feef29..3ec0ad00c8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -6,6 +6,8 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ import ( "fmt" "net/http" + "os" + "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -19,40 +21,51 @@ type ResponseTelemetry struct { WriteError error } -type HTTPServer interface { - // RequestTraceAttrs returns trace attributes for an HTTP request received by a - // server. - // - // The server must be the primary server name if it is known. For example this - // would be the ServerName directive - // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache - // server, and the server_name directive - // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an - // nginx server. More generically, the primary server name would be the host - // header value that matches the default virtual host of an HTTP server. It - // should include the host identifier and if a port is used to route to the - // server that port identifier should be included as an appropriate port - // suffix. - // - // If the primary server name is not known, server should be an empty string. - // The req Host will be used to determine the server instead. - RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue +type HTTPServer struct { + duplicate bool +} - // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. - // - // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. - ResponseTraceAttrs(ResponseTelemetry) []attribute.KeyValue +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} - // Route returns the attribute for the route. - Route(string) attribute.KeyValue +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) } -// var warnOnce = sync.Once{} +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} func NewHTTPServer() HTTPServer { - // TODO (#5331): Detect version based on environment variable OTEL_HTTP_CLIENT_COMPATIBILITY_MODE. - // TODO (#5331): Add warning of use of a deprecated version of Semantic Versions. - return oldHTTPServer{} + env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) + return HTTPServer{duplicate: env == "http/dup"} } // ServerStatus returns a span status code and message for an HTTP status code diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index c92076bc3d..e7f293761b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -5,8 +5,12 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ import ( "net" + "net/http" "strconv" "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" ) // splitHostPort splits a network address hostport of the form "host", @@ -47,3 +51,41 @@ func splitHostPort(hostport string) (host string, port int) { } return host, int(p) } + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index d753083b7b..c3e838aaa5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -4,6 +4,7 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "errors" "io" "net/http" @@ -14,8 +15,6 @@ import ( type oldHTTPServer struct{} -var _ HTTPServer = oldHTTPServer{} - // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. // @@ -45,7 +44,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke if resp.ReadBytes > 0 { attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) } - if resp.ReadError != nil && resp.ReadError != io.EOF { + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { // This is not in the semantic conventions, but is historically provided attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) } @@ -55,7 +54,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke if resp.StatusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) } - if resp.WriteError != nil && resp.WriteError != io.EOF { + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { // This is not in the semantic conventions, but is historically provided attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go new file mode 100644 index 0000000000..0c5d4c4608 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d5c0093fc4..a9a9226b39 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -92,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -138,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 1548b2db63..ea504e396f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -37,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 8a25e58657..0d3cb2e4aa 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,15 +11,14 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -137,8 +136,10 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. @@ -170,11 +171,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.responseBytesCounter.Add(ctx, n, addOpts...) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 22e485dd7d..b0957f28ce 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.52.0" + return "0.53.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go index 2f4cc124dc..948f8406c0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -87,3 +87,13 @@ func (w *respWriterWrapper) WriteHeader(statusCode int) { } w.ResponseWriter.WriteHeader(statusCode) } + +func (w *respWriterWrapper) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go index 88f1309f30..b05991e172 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go @@ -5,7 +5,7 @@ package b3 // import "go.opentelemetry.io/contrib/propagators/b3" // Version is the current release version of the B3 propagator. func Version() string { - return "1.28.0" + return "1.29.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go index a64230ebaf..eb220fbfe6 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go @@ -5,7 +5,7 @@ package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" // Version is the current release version of the Jaeger propagator. func Version() string { - return "1.28.0" + return "1.29.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go b/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go index 3c726b1896..3c2eca9865 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go @@ -5,7 +5,7 @@ package ot // import "go.opentelemetry.io/contrib/propagators/ot" // Version is the current release version of the ot propagator. func Version() string { - return "1.28.0" + return "1.29.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b6495..d9abe194d9 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998e0..6107c17b89 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,64 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +107,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +234,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -3003,7 +3062,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3146,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 2025549332..5904bb7070 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,7 +5,7 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f58e..b7402576f9 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -650,7 +650,7 @@ should be canceled. ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index f33619f76a..070b1e57df 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -178,17 +178,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a89093173..657df34710 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,29 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3d8..59992984d4 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896cc6..b3569e95e5 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a6d..2acbac3546 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c595014..921f85961a 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index c3c69c5a0d..81157a71c5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -4,6 +4,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + "math" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -102,9 +104,9 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { Name: sd.Name(), Attributes: KeyValues(sd.Attributes()), Events: spanEvents(sd.Events()), - DroppedAttributesCount: uint32(sd.DroppedAttributes()), - DroppedEventsCount: uint32(sd.DroppedEvents()), - DroppedLinksCount: uint32(sd.DroppedLinks()), + DroppedAttributesCount: clampUint32(sd.DroppedAttributes()), + DroppedEventsCount: clampUint32(sd.DroppedEvents()), + DroppedLinksCount: clampUint32(sd.DroppedLinks()), } if psid := sd.Parent().SpanID(); psid.IsValid() { @@ -115,6 +117,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { return s } +func clampUint32(v int) uint32 { + if v < 0 { + return 0 + } + if int64(v) > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(v) // nolint: gosec // Overflow/Underflow checked. +} + // status transform a span code and message into an OTLP span status. func status(status codes.Code, message string) *tracepb.Status { var c tracepb.Status_StatusCode @@ -153,7 +165,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { TraceId: tid[:], SpanId: sid[:], Attributes: KeyValues(otLink.Attributes), - DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount), Flags: flags, }) } @@ -182,7 +194,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { Name: es[i].Name, TimeUnixNano: uint64(es[i].Time.UnixNano()), Attributes: KeyValues(es[i].Attributes), - DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), } } return events diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go index e783b57ac4..b7bd429ffd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 9513c0a57c..4abf48d1f6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 14ad8c33b4..780992528d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b356..9b1da2c02b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -24,7 +24,8 @@ func Int64ToRaw(i int64) uint64 { } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +37,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e015..14e08c24a4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index f4d1857c4f..f2cdf3c651 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -4,5 +4,6 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -// Deprecated: please use Scope instead. +// +// Deprecated: use [Scope] instead. type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 32f862790c..d511d0f271 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns information about the instrumentation // library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index ac90f1a260..4945f50830 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -62,7 +62,7 @@ type ReadOnlySpan interface { // InstrumentationLibrary returns information about the instrumentation // library that created the span. // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. Resource() *resource.Resource // DroppedAttributes returns the number of attributes dropped by the span @@ -642,7 +642,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go index 0a641f9488..cd2cc30ca2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -45,22 +45,25 @@ func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { // SpanStub is a stand-in for a Span. type SpanStub struct { - Name string - SpanContext trace.SpanContext - Parent trace.SpanContext - SpanKind trace.SpanKind - StartTime time.Time - EndTime time.Time - Attributes []attribute.KeyValue - Events []tracesdk.Event - Links []tracesdk.Link - Status tracesdk.Status - DroppedAttributes int - DroppedEvents int - DroppedLinks int - ChildSpanCount int - Resource *resource.Resource - InstrumentationLibrary instrumentation.Library + Name string + SpanContext trace.SpanContext + Parent trace.SpanContext + SpanKind trace.SpanKind + StartTime time.Time + EndTime time.Time + Attributes []attribute.KeyValue + Events []tracesdk.Event + Links []tracesdk.Link + Status tracesdk.Status + DroppedAttributes int + DroppedEvents int + DroppedLinks int + ChildSpanCount int + Resource *resource.Resource + InstrumentationScope instrumentation.Scope + + // Deprecated: use InstrumentationScope instead. + InstrumentationLibrary instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility } // SpanStubFromReadOnlySpan returns a SpanStub populated from ro. @@ -85,12 +88,18 @@ func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { DroppedLinks: ro.DroppedLinks(), ChildSpanCount: ro.ChildSpanCount(), Resource: ro.Resource(), + InstrumentationScope: ro.InstrumentationScope(), InstrumentationLibrary: ro.InstrumentationScope(), } } // Snapshot returns a read-only copy of the SpanStub. func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { + scopeOrLibrary := s.InstrumentationScope + if scopeOrLibrary.Name == "" && scopeOrLibrary.Version == "" && scopeOrLibrary.SchemaURL == "" { + scopeOrLibrary = s.InstrumentationLibrary + } + return spanSnapshot{ name: s.Name, spanContext: s.SpanContext, @@ -107,7 +116,7 @@ func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { droppedLinks: s.DroppedLinks, childSpanCount: s.ChildSpanCount, resource: s.Resource, - instrumentationScope: s.InstrumentationLibrary, + instrumentationScope: scopeOrLibrary, } } @@ -152,6 +161,6 @@ func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { return s.instrumentationScope } -func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { +func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 33d065a7cb..b7cede891c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go index aa9e101715..1a820bdb30 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -309,5 +309,5 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bit size of 16 checked above. } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md new file mode 100644 index 0000000000..0b6cbe960c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.24.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go new file mode 100644 index 0000000000..6e688345cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go @@ -0,0 +1,4387 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes FaaS attributes. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes Database attributes +const ( + // PoolNameKey is the attribute Key conforming to the "pool.name" semantic + // conventions. It represents the name of the connection pool; unique + // within the instrumented application. In case the connection pool + // implementation doesn't provide a name, then the + // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) + // should be used + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myDataSource' + PoolNameKey = attribute.Key("pool.name") + + // StateKey is the attribute Key conforming to the "state" semantic + // conventions. It represents the state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'idle' + StateKey = attribute.Key("state") +) + +var ( + // idle + StateIdle = StateKey.String("idle") + // used + StateUsed = StateKey.String("used") +) + +// PoolName returns an attribute KeyValue conforming to the "pool.name" +// semantic conventions. It represents the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, then the +// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) +// should be used +func PoolName(val string) attribute.KeyValue { + return PoolNameKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: experimental + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if the matched endpoint for the + // request had a rate-limiting policy.) + // Stability: experimental + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (if and only if the request was + // not handled.) + // Stability: experimental + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If and only if a route was + // successfully matched.) + // Stability: experimental + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// Describes JVM buffer metric attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// Describes System metric attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU metric attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory metric attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging metric attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem metric attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network metric attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process metric attributes +const ( + // SystemProcessesStatusKey is the attribute Key conforming to the + // "system.processes.status" semantic conventions. It represents the + // process state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessesStatusKey = attribute.Key("system.processes.status") +) + +var ( + // running + SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") + // sleeping + SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") + // stopped + SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") + // defunct + SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// The attributes used to describe telemetry in the context of databases. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary Cassandra table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") + + // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" + // semantic conventions. It represents an identifier (address, unique name, + // or any other identifier) of the database instance that is executing + // queries or mutations on the current connection. This is useful in cases + // where the database is running in a clustered environment and the + // instrumentation is able to record the node executing the query. The + // client may obtain this value in databases like MySQL using queries like + // `select @@hostname`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + DBInstanceIDKey = attribute.Key("db.instance.id") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the MongoDB + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") + + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") + + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") + + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBSystemKey = attribute.Key("db.system") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary Cassandra table that the operation is acting upon, including the +// keyspace name (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// DBInstanceID returns an attribute KeyValue conforming to the +// "db.instance.id" semantic conventions. It represents an identifier (address, +// unique name, or any other identifier) of the database instance that is +// executing queries or mutations on the current connection. This is useful in +// cases where the database is running in a clustered environment and the +// instrumentation is able to record the node executing the query. The client +// may obtain this value in databases like MySQL using queries like `select +// @@hostname`. +func DBInstanceID(val string) attribute.KeyValue { + return DBInstanceIDKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the MongoDB +// collection being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + // Deprecated: use `http.request.method` instead. + HTTPMethodKey = attribute.Key("http.method") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.request.header.content-length` instead. + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.response.header.content-length` instead. + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + // Deprecated: use `url.scheme` instead. + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + // Deprecated: use `http.response.status_code` instead. + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.path` and `url.query` instead. + HTTPTargetKey = attribute.Key("http.target") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.full` instead. + HTTPURLKey = attribute.Key("http.url") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + // Deprecated: use `user_agent.original` instead. + HTTPUserAgentKey = attribute.Key("http.user_agent") +) + +var ( + // HTTP/1.0 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. +// +// Deprecated: use `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. +// +// Deprecated: use `http.request.header.content-length` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. +// +// Deprecated: use `http.response.header.content-length` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. +// +// Deprecated: use `url.scheme` instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. +// +// Deprecated: use `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. +// +// Deprecated: use `url.path` and `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. +// +// Deprecated: use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. +// +// Deprecated: use `user_agent.original` instead. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address`. + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port`. + NetHostPortKey = attribute.Key("net.host.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address` on client spans and `client.address` on + // server spans. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port` on client spans and `client.port` on + // server spans. + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + // Deprecated: use `network.protocol.name`. + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + // Deprecated: use `network.protocol.version`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: use `network.local.address`. + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `network.local.port`. + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + // Deprecated: use `network.peer.address`. + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: no replacement at this time. + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + // Deprecated: use `network.peer.port`. + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport`. + NetTransportKey = attribute.Key("net.transport") +) + +var ( + // IPv4 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // ip_tcp + // + // Deprecated: use `network.transport`. + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + // + // Deprecated: use `network.transport`. + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + // + // Deprecated: use `network.transport`. + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + // + // Deprecated: use `network.transport`. + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + // + // Deprecated: use `network.transport`. + NetTransportOther = NetTransportKey.String("other") +) + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. +// +// Deprecated: use `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. +// +// Deprecated: use `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. +// +// Deprecated: use `server.address` on client spans and `client.address` on +// server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. +// +// Deprecated: use `server.port` on client spans and `client.port` on server +// spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. +// +// Deprecated: use `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. +// +// Deprecated: use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. +// +// Deprecated: use `network.local.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. +// +// Deprecated: use `network.local.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. +// +// Deprecated: use `network.peer.address`. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. +// +// Deprecated: no replacement at this time. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. +// +// Deprecated: use `network.peer.port`. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable and SHOULD have low + // cardinality. + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents an identifier for + // the messaging system being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationPublish = MessagingOperationKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationCreate = MessagingOperationKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationReceive = MessagingOperationKey.String("receive") + // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs + MessagingOperationDeliver = MessagingOperationKey.String("deliver") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") + // Azure Event Hubs + MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") + // Azure Service Bus + MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the protocol specified in `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the protocol specified in `network.protocol.name`. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") +) + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go new file mode 100644 index 0000000000..d27e8a8f8b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.24.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go new file mode 100644 index 0000000000..6c019aafc3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // IosStateKey is the attribute Key conforming to the "ios.state" semantic + // conventions. It represents the this attribute represents the state the + // application has transitioned into at the occurrence of the event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate + // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), + // and from which the `OS terminology` column values are derived. + IosStateKey = attribute.Key("ios.state") +) + +var ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + IosStateActive = IosStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + IosStateInactive = IosStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + IosStateBackground = IosStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + IosStateForeground = IosStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + IosStateTerminate = IosStateKey.String("terminate") +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the this attribute represents the + // state the application has transitioned into at the occurrence of the + // event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessageTypeKey = attribute.Key("message.type") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go new file mode 100644 index 0000000000..7235bb51d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go new file mode 100644 index 0000000000..a6b953f625 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go @@ -0,0 +1,1071 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // time between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Experimental + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Experimental + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // HTTPClientRequestTimeInQueue is the metric conforming to the + // "http.client.request.time_in_queue" semantic conventions. It represents the + // amount of time requests spent on a queue waiting for an available + // connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" + HTTPClientRequestTimeInQueueUnit = "s" + HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Experimental + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Experimental + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingDeliverDuration is the metric conforming to the + // "messaging.deliver.duration" semantic conventions. It represents the + // measures the duration of deliver operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingDeliverDurationName = "messaging.deliver.duration" + MessagingDeliverDurationUnit = "s" + MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingDeliverMessages is the metric conforming to the + // "messaging.deliver.messages" semantic conventions. It represents the + // measures the number of delivered messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingDeliverMessagesName = "messaging.deliver.messages" + MessagingDeliverMessagesUnit = "{message}" + MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessesCount is the metric conforming to the + // "system.processes.count" semantic conventions. It represents the total + // number of processes in each state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessesCountName = "system.processes.count" + SystemProcessesCountUnit = "{process}" + SystemProcessesCountDescription = "Total number of processes in each state" + + // SystemProcessesCreated is the metric conforming to the + // "system.processes.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessesCreatedName = "system.processes.created" + SystemProcessesCreatedUnit = "{process}" + SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go new file mode 100644 index 0000000000..d66bbe9c23 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go @@ -0,0 +1,2545 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val int) attribute.KeyValue { + return HostCPUSteppingKey.Int(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// A serverless instance. +const ( + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + // Deprecated: use the `otel.scope.name` attribute. + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + // Deprecated: use the `otel.scope.version` attribute. + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. +// +// Deprecated: use the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. +// +// Deprecated: use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go new file mode 100644 index 0000000000..fe80b1731d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go new file mode 100644 index 0000000000..c1718234e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go @@ -0,0 +1,1323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") +) + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") +) + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 0000000000..ef85cb70c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 0000000000..d3aa476ee1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4ab4..d49adf671b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 0000000000..77952d2a0b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf2433..dc5e34cad0 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 0000000000..c9b7cdbbfe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ab28960524..f67039ed1f 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82a8..3ba611d713 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.29.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,15 +29,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.51.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.5.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: @@ -46,4 +47,3 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/crypto/LICENSE +++ b/vendor/golang.org/x/crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/sys/LICENSE +++ b/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 8fa707aa4b..ec07aab057 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -105,6 +105,8 @@ var ARM64 struct { HasSVE bool // Scalable Vector Extensions HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + HasDIT bool // Data Independent Timing support + HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 0e27a21e1f..af2aa99f9f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -38,6 +38,8 @@ func initOptions() { {Name: "dcpop", Feature: &ARM64.HasDCPOP}, {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + {Name: "dit", Feature: &ARM64.HasDIT}, + {Name: "i8mm", Feature: &ARM64.HasI8MM}, } } @@ -145,6 +147,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { ARM64.HasLRCPC = true } + switch extractBits(isar1, 52, 55) { + case 1: + ARM64.HasI8MM = true + } + // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: @@ -168,6 +175,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { parseARM64SVERegister(getzfr0()) } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } } func parseARM64SVERegister(zfr0 uint64) { diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 3d386d0fc2..08f35ea177 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,8 +35,10 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -106,9 +108,12 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) + // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 4ed2e488b6..d07dd09eb5 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 4cc7b00596..2d15200adb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e2628a..3f1d3d4cb2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2592,3 +2592,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c71a..b86ded549c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa85245..4308ac1772 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1169,6 +1169,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c05..c8068a7a16 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1169,6 +1169,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 877a62b479..01a70b2463 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -457,6 +457,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -928,6 +929,7 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 ESP_V4_FLOW = 0xa ESP_V6_FLOW = 0xc @@ -941,9 +943,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1705,6 +1704,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1780,6 +1780,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1861,6 +1862,19 @@ const ( MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -2498,6 +2512,23 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -3192,6 +3223,7 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e4bc0bd57c..684a5168da 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 689317afdb..61d74b592d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668ac3..a28c9e3e89 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 14270508b0..ab5d1fe8ea 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afdcb..c523090e7c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92cb4..01e6ea7804 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba78..7aa610b1e7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0da..92af771b44 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e1b..b27ef5e6f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3af..237a2cefb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa7281b..4a5c555a36 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e015033..a02fb49a5f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e9f..e26a7c61b2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214a7..c48f7c2103 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d901..ad4b9aace7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -82,6 +82,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 07642c308d..b622533ef2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 923e08cb79..cfe6646baf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,6 +223,16 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 7d73dda647..13f624f69f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 057700111e..fe222b75df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,6 +223,16 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a1d..1bc1a5adb2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2229,3 +2229,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410b7..1851df14e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b5617316..0b43c69365 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751cd..e1ec0dbe4e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656f6..880c6d6e31 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776db..7c8452a63e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f90..b8ef95b0fa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d072f..2ffdf861f7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d532121..2af3b5c762 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e67e..1da08d5267 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b62c..b7a251353b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354c8..6e85b0aac9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff06d..f15dadf055 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed584..28b487df25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a4b..1e7f321e43 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc58..524b0820cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d524763d..d3e38f681a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -379,4 +379,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c747706131..70b35bf3b0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f6d..6c778c2327 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -324,4 +324,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346cf..37281cf51a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -318,4 +318,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018da..7e567f1eff 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b81..38ae55e5ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca7a..55e92e60a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c39f..60658d6a02 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc435..e203e8a7ed 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b44..5944b97d54 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e16357d..c66d416dad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9f7..9889f6a559 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -325,4 +325,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad50..01d86825bb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d741..7b703e77cd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 4740b83485..7f1961b907 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -110,7 +110,8 @@ type Statx_t struct { Mnt_id uint64 Dio_mem_align uint32 Dio_offset_align uint32 - _ [12]uint64 + Subvol uint64 + _ [11]uint64 } type Fsid struct { @@ -3473,7 +3474,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3806,6 +3807,9 @@ const ( ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3975,7 +3979,7 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_MAX = 0x6 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 97651b5bd0..b6e1ab76f8 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1179,7 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW -//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f3c..1fa34fd17c 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -211,6 +213,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -1368,9 +1374,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db0a..3f03b3d57c 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2003,7 +2003,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2031,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -3404,3 +3462,14 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index eba761018a..9bb979a3e4 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -478,12 +478,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -789,6 +793,14 @@ func FreeSid(sid *SID) (err error) { return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) len = uint32(r0) @@ -1225,14 +1237,6 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } -func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { - r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) - if r0 == 0 { - ret = GetLastError() - } - return -} - func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { @@ -4082,6 +4086,12 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -4115,6 +4125,15 @@ func IsWindowVisible(hwnd HWND) (isVisible bool) { return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) @@ -4124,6 +4143,20 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go deleted file mode 100644 index c6e7c0d442..0000000000 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesdriver fetches type sizes for go/packages and go/analysis. -package packagesdriver - -import ( - "context" - "fmt" - "strings" - - "golang.org/x/tools/internal/gocommand" -) - -// TODO(adonovan): move back into go/packages. -func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { - inv.Verb = "list" - inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} - stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) - var goarch, compiler string - if rawErr != nil { - rawErrMsg := rawErr.Error() - if strings.Contains(rawErrMsg, "cannot find main module") || - strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. - // All bets are off. Get GOARCH and guess compiler is gc. - // TODO(matloob): Is this a problem in practice? - inv.Verb = "env" - inv.Args = []string{"GOARCH"} - envout, enverr := gocmdRunner.Run(ctx, inv) - if enverr != nil { - return "", "", enverr - } - goarch = strings.TrimSpace(envout.String()) - compiler = "gc" - } else if friendlyErr != nil { - return "", "", friendlyErr - } else { - // This should be unreachable, but be defensive - // in case RunRaw's error results are inconsistent. - return "", "", rawErr - } - } else { - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", - stdout.String(), stderr.String()) - } - goarch = fields[0] - compiler = fields[1] - } - return compiler, goarch, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index d9be410aa1..1a3a5b44f5 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -21,7 +21,6 @@ import ( "sync" "unicode" - "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" ) @@ -149,7 +148,7 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { errCh := make(chan error) go func() { - compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -1024,3 +1023,44 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// getSizesForArgs queries 'go list' for the appropriate +// Compiler and GOARCH arguments to pass to [types.SizesFor]. +func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 34306ddd39..0b6bfaff80 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -46,7 +46,6 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/48226 // - https://github.com/golang/go/issues/56633 // - https://github.com/golang/go/issues/56677 // - https://github.com/golang/go/issues/58726 @@ -76,7 +75,7 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax. + // NeedSyntax adds Syntax and Fset. NeedSyntax // NeedTypesInfo adds TypesInfo. @@ -961,12 +960,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } + if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Fset = nil + } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -1499,6 +1500,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } return loadMode } diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index a1dcc40b72..df14ffd94d 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int + errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index a2386c347a..9ada177758 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,8 +63,8 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, +// - The TT operators are encoded as [EKPRUTrCa]; +// two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; // three of these (At,Field,Method) require an integer operand, @@ -98,19 +98,21 @@ const ( opType = '.' // .Type() (Object) // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) ) // For is equivalent to new(Encoder).For(obj). @@ -278,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() + if alias, ok := T.(*aliases.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { + return Path(r), nil + } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil { + return Path(r), nil + } - if tname.IsAlias() { - // type alias + } else if tname.IsAlias() { + // legacy alias if r := find(obj, T, path, nil); r != nil { return Path(r), nil } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { - // generic named type - return Path(r), nil - } - } + + } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil { return Path(r), nil } } @@ -462,7 +469,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { + if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil { + return r + } + if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -525,10 +535,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) + path2 := appendOpArg(path, op, i) if r := find(obj, tparam, path2, seen); r != nil { return r } @@ -580,10 +590,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { code := suffix[0] suffix = suffix[1:] - // Codes [AFM] have an integer operand. + // Codes [AFMTr] have an integer operand. var index int switch code { - case opAt, opField, opMethod, opTypeParam: + case opAt, opField, opMethod, opTypeParam, opRecvTypeParam: rest := strings.TrimLeft(suffix, "0123456789") numerals := suffix[:len(suffix)-len(rest)] suffix = rest @@ -653,6 +663,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() + case opRhs: + if alias, ok := t.(*aliases.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { @@ -664,6 +684,17 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = tparams.At(index) + case opRecvTypeParam: + sig, ok := t.(*types.Signature) // Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + rtparams := sig.RecvTypeParams() + if n := rtparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = rtparams.At(index) + case opConstraint: tparam, ok := t.(*types.TypeParam) if !ok { @@ -725,6 +756,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } } + if obj == nil { + panic(p) // path does not end in an object-valued operator + } + if obj.Pkg() != pkg { return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go index c027b9f315..6652f7db0f 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -15,10 +15,14 @@ import ( // It will never be created by go/types. type Alias struct{} -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } +func (*Alias) String() string { panic("unreachable") } +func (*Alias) Underlying() types.Type { panic("unreachable") } +func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func Rhs(alias *Alias) types.Type { panic("unreachable") } +func TypeParams(alias *Alias) *types.TypeParamList { panic("unreachable") } +func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { panic("unreachable") } +func TypeArgs(alias *Alias) *types.TypeList { panic("unreachable") } +func Origin(alias *Alias) *Alias { panic("unreachable") } // Unalias returns the type t for go <=1.21. func Unalias(t types.Type) types.Type { return t } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index b329954841..3ef1afeb40 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -28,6 +28,42 @@ func Rhs(alias *Alias) types.Type { return Unalias(alias) } +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} + +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) +} + +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *Alias) *Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) +} + // Unalias is a wrapper of types.Unalias. func Unalias(t types.Type) types.Type { return types.Unalias(t) } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index af0ee6c614..2e59ff8558 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -200,12 +200,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io return } -func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { - log := i.Logf - if log == nil { - log = func(string, ...interface{}) {} +// logf logs if i.Logf is non-nil. +func (i *Invocation) logf(format string, args ...any) { + if i.Logf != nil { + i.Logf(format, args...) } +} +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { goArgs := []string{i.Verb} appendModFile := func() { @@ -277,7 +279,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Dir = i.WorkingDir } - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + debugStr := cmdDebugStr(cmd) + i.logf("starting %v", debugStr) + start := time.Now() + defer func() { + i.logf("%s for %v", time.Since(start), debugStr) + }() return runCmdContext(ctx, cmd) } @@ -514,7 +521,7 @@ func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), for k, v := range overlay { // Use a unique basename for each file (001-foo.go), // to avoid creating nested directories. - base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k)) + base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k)) filename := filepath.Join(dir, base) err := os.WriteFile(filename, v, 0666) if err != nil { diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index 2acd85851e..b92e8e6eb3 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -23,9 +23,6 @@ type PkgDecoder struct { // version is the file format version. version uint32 - // aliases determines whether types.Aliases should be created - aliases bool - // sync indicates whether the file uses sync markers. sync bool @@ -76,7 +73,6 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, - //aliases: aliases.Enabled(), } // TODO(mdempsky): Implement direct indexing of input string to diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index fd6892075e..a928acf29f 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -23,6 +23,7 @@ var PackageSymbols = map[string][]Symbol{ {"ErrWriteAfterClose", Var, 0}, {"ErrWriteTooLong", Var, 0}, {"FileInfoHeader", Func, 1}, + {"FileInfoNames", Type, 23}, {"Format", Type, 10}, {"FormatGNU", Const, 10}, {"FormatPAX", Const, 10}, @@ -820,6 +821,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*ConnectionState).ExportKeyingMaterial", Method, 11}, {"(*Dialer).Dial", Method, 15}, {"(*Dialer).DialContext", Method, 15}, + {"(*ECHRejectionError).Error", Method, 23}, {"(*QUICConn).Close", Method, 21}, {"(*QUICConn).ConnectionState", Method, 21}, {"(*QUICConn).HandleData", Method, 21}, @@ -827,6 +829,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*QUICConn).SendSessionTicket", Method, 21}, {"(*QUICConn).SetTransportParameters", Method, 21}, {"(*QUICConn).Start", Method, 21}, + {"(*QUICConn).StoreSession", Method, 23}, {"(*SessionState).Bytes", Method, 21}, {"(AlertError).Error", Method, 21}, {"(ClientAuthType).String", Method, 15}, @@ -877,6 +880,8 @@ var PackageSymbols = map[string][]Symbol{ {"Config.ClientSessionCache", Field, 3}, {"Config.CurvePreferences", Field, 3}, {"Config.DynamicRecordSizingDisabled", Field, 7}, + {"Config.EncryptedClientHelloConfigList", Field, 23}, + {"Config.EncryptedClientHelloRejectionVerify", Field, 23}, {"Config.GetCertificate", Field, 4}, {"Config.GetClientCertificate", Field, 8}, {"Config.GetConfigForClient", Field, 8}, @@ -902,6 +907,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConnectionState", Type, 0}, {"ConnectionState.CipherSuite", Field, 0}, {"ConnectionState.DidResume", Field, 1}, + {"ConnectionState.ECHAccepted", Field, 23}, {"ConnectionState.HandshakeComplete", Field, 0}, {"ConnectionState.NegotiatedProtocol", Field, 0}, {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0}, @@ -925,6 +931,8 @@ var PackageSymbols = map[string][]Symbol{ {"ECDSAWithP384AndSHA384", Const, 8}, {"ECDSAWithP521AndSHA512", Const, 8}, {"ECDSAWithSHA1", Const, 10}, + {"ECHRejectionError", Type, 23}, + {"ECHRejectionError.RetryConfigList", Field, 23}, {"Ed25519", Const, 13}, {"InsecureCipherSuites", Func, 14}, {"Listen", Func, 0}, @@ -943,6 +951,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, + {"QUICConfig.EnableStoreSessionEvent", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, @@ -954,16 +963,20 @@ var PackageSymbols = map[string][]Symbol{ {"QUICEvent.Data", Field, 21}, {"QUICEvent.Kind", Field, 21}, {"QUICEvent.Level", Field, 21}, + {"QUICEvent.SessionState", Field, 23}, {"QUICEvent.Suite", Field, 21}, {"QUICEventKind", Type, 21}, {"QUICHandshakeDone", Const, 21}, {"QUICNoEvent", Const, 21}, {"QUICRejectedEarlyData", Const, 21}, + {"QUICResumeSession", Const, 23}, {"QUICServer", Func, 21}, {"QUICSessionTicketOptions", Type, 21}, {"QUICSessionTicketOptions.EarlyData", Field, 21}, + {"QUICSessionTicketOptions.Extra", Field, 23}, {"QUICSetReadSecret", Const, 21}, {"QUICSetWriteSecret", Const, 21}, + {"QUICStoreSession", Const, 23}, {"QUICTransportParameters", Const, 21}, {"QUICTransportParametersRequired", Const, 21}, {"QUICWriteData", Const, 21}, @@ -1036,6 +1049,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Certificate).Verify", Method, 0}, {"(*Certificate).VerifyHostname", Method, 0}, {"(*CertificateRequest).CheckSignature", Method, 5}, + {"(*OID).UnmarshalBinary", Method, 23}, + {"(*OID).UnmarshalText", Method, 23}, {"(*RevocationList).CheckSignatureFrom", Method, 19}, {"(CertificateInvalidError).Error", Method, 0}, {"(ConstraintViolationError).Error", Method, 0}, @@ -1043,6 +1058,8 @@ var PackageSymbols = map[string][]Symbol{ {"(InsecureAlgorithmError).Error", Method, 6}, {"(OID).Equal", Method, 22}, {"(OID).EqualASN1OID", Method, 22}, + {"(OID).MarshalBinary", Method, 23}, + {"(OID).MarshalText", Method, 23}, {"(OID).String", Method, 22}, {"(PublicKeyAlgorithm).String", Method, 10}, {"(SignatureAlgorithm).String", Method, 6}, @@ -1196,6 +1213,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseCertificates", Func, 0}, {"ParseDERCRL", Func, 0}, {"ParseECPrivateKey", Func, 1}, + {"ParseOID", Func, 23}, {"ParsePKCS1PrivateKey", Func, 0}, {"ParsePKCS1PublicKey", Func, 10}, {"ParsePKCS8PrivateKey", Func, 0}, @@ -2541,6 +2559,7 @@ var PackageSymbols = map[string][]Symbol{ {"PT_NOTE", Const, 0}, {"PT_NULL", Const, 0}, {"PT_OPENBSD_BOOTDATA", Const, 16}, + {"PT_OPENBSD_NOBTCFI", Const, 23}, {"PT_OPENBSD_RANDOMIZE", Const, 16}, {"PT_OPENBSD_WXNEEDED", Const, 16}, {"PT_PAX_FLAGS", Const, 16}, @@ -3620,13 +3639,16 @@ var PackageSymbols = map[string][]Symbol{ {"STT_COMMON", Const, 0}, {"STT_FILE", Const, 0}, {"STT_FUNC", Const, 0}, + {"STT_GNU_IFUNC", Const, 23}, {"STT_HIOS", Const, 0}, {"STT_HIPROC", Const, 0}, {"STT_LOOS", Const, 0}, {"STT_LOPROC", Const, 0}, {"STT_NOTYPE", Const, 0}, {"STT_OBJECT", Const, 0}, + {"STT_RELC", Const, 23}, {"STT_SECTION", Const, 0}, + {"STT_SRELC", Const, 23}, {"STT_TLS", Const, 0}, {"STV_DEFAULT", Const, 0}, {"STV_HIDDEN", Const, 0}, @@ -4544,11 +4566,14 @@ var PackageSymbols = map[string][]Symbol{ {"URLEncoding", Var, 0}, }, "encoding/binary": { + {"Append", Func, 23}, {"AppendByteOrder", Type, 19}, {"AppendUvarint", Func, 19}, {"AppendVarint", Func, 19}, {"BigEndian", Var, 0}, {"ByteOrder", Type, 0}, + {"Decode", Func, 23}, + {"Encode", Func, 23}, {"LittleEndian", Var, 0}, {"MaxVarintLen16", Const, 0}, {"MaxVarintLen32", Const, 0}, @@ -5308,6 +5333,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.Rparen", Field, 0}, {"ParenExpr.X", Field, 0}, {"Pkg", Const, 0}, + {"Preorder", Func, 23}, {"Print", Func, 0}, {"RECV", Const, 0}, {"RangeStmt", Type, 0}, @@ -5898,7 +5924,12 @@ var PackageSymbols = map[string][]Symbol{ }, "go/types": { {"(*Alias).Obj", Method, 22}, + {"(*Alias).Origin", Method, 23}, + {"(*Alias).Rhs", Method, 23}, + {"(*Alias).SetTypeParams", Method, 23}, {"(*Alias).String", Method, 22}, + {"(*Alias).TypeArgs", Method, 23}, + {"(*Alias).TypeParams", Method, 23}, {"(*Alias).Underlying", Method, 22}, {"(*ArgumentError).Error", Method, 18}, {"(*ArgumentError).Unwrap", Method, 18}, @@ -5943,6 +5974,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Func).Pkg", Method, 5}, {"(*Func).Pos", Method, 5}, {"(*Func).Scope", Method, 5}, + {"(*Func).Signature", Method, 23}, {"(*Func).String", Method, 5}, {"(*Func).Type", Method, 5}, {"(*Info).ObjectOf", Method, 5}, @@ -6992,6 +7024,12 @@ var PackageSymbols = map[string][]Symbol{ {"TempFile", Func, 0}, {"WriteFile", Func, 0}, }, + "iter": { + {"Pull", Func, 23}, + {"Pull2", Func, 23}, + {"Seq", Type, 23}, + {"Seq2", Type, 23}, + }, "log": { {"(*Logger).Fatal", Method, 0}, {"(*Logger).Fatalf", Method, 0}, @@ -7222,11 +7260,16 @@ var PackageSymbols = map[string][]Symbol{ {"Writer", Type, 0}, }, "maps": { + {"All", Func, 23}, {"Clone", Func, 21}, + {"Collect", Func, 23}, {"Copy", Func, 21}, {"DeleteFunc", Func, 21}, {"Equal", Func, 21}, {"EqualFunc", Func, 21}, + {"Insert", Func, 23}, + {"Keys", Func, 23}, + {"Values", Func, 23}, }, "math": { {"Abs", Func, 0}, @@ -7617,6 +7660,7 @@ var PackageSymbols = map[string][]Symbol{ }, "math/rand/v2": { {"(*ChaCha8).MarshalBinary", Method, 22}, + {"(*ChaCha8).Read", Method, 23}, {"(*ChaCha8).Seed", Method, 22}, {"(*ChaCha8).Uint64", Method, 22}, {"(*ChaCha8).UnmarshalBinary", Method, 22}, @@ -7636,6 +7680,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).NormFloat64", Method, 22}, {"(*Rand).Perm", Method, 22}, {"(*Rand).Shuffle", Method, 22}, + {"(*Rand).Uint", Method, 23}, {"(*Rand).Uint32", Method, 22}, {"(*Rand).Uint32N", Method, 22}, {"(*Rand).Uint64", Method, 22}, @@ -7663,6 +7708,7 @@ var PackageSymbols = map[string][]Symbol{ {"Rand", Type, 22}, {"Shuffle", Func, 22}, {"Source", Type, 22}, + {"Uint", Func, 23}, {"Uint32", Func, 22}, {"Uint32N", Func, 22}, {"Uint64", Func, 22}, @@ -7743,6 +7789,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*DNSError).Error", Method, 0}, {"(*DNSError).Temporary", Method, 0}, {"(*DNSError).Timeout", Method, 0}, + {"(*DNSError).Unwrap", Method, 23}, {"(*Dialer).Dial", Method, 1}, {"(*Dialer).DialContext", Method, 7}, {"(*Dialer).MultipathTCP", Method, 21}, @@ -7809,6 +7856,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*TCPConn).RemoteAddr", Method, 0}, {"(*TCPConn).SetDeadline", Method, 0}, {"(*TCPConn).SetKeepAlive", Method, 0}, + {"(*TCPConn).SetKeepAliveConfig", Method, 23}, {"(*TCPConn).SetKeepAlivePeriod", Method, 2}, {"(*TCPConn).SetLinger", Method, 0}, {"(*TCPConn).SetNoDelay", Method, 0}, @@ -7922,6 +7970,7 @@ var PackageSymbols = map[string][]Symbol{ {"DNSError.IsTimeout", Field, 0}, {"DNSError.Name", Field, 0}, {"DNSError.Server", Field, 0}, + {"DNSError.UnwrapErr", Field, 23}, {"DefaultResolver", Var, 8}, {"Dial", Func, 0}, {"DialIP", Func, 0}, @@ -7937,6 +7986,7 @@ var PackageSymbols = map[string][]Symbol{ {"Dialer.DualStack", Field, 2}, {"Dialer.FallbackDelay", Field, 5}, {"Dialer.KeepAlive", Field, 3}, + {"Dialer.KeepAliveConfig", Field, 23}, {"Dialer.LocalAddr", Field, 1}, {"Dialer.Resolver", Field, 8}, {"Dialer.Timeout", Field, 1}, @@ -7989,10 +8039,16 @@ var PackageSymbols = map[string][]Symbol{ {"Interfaces", Func, 0}, {"InvalidAddrError", Type, 0}, {"JoinHostPort", Func, 0}, + {"KeepAliveConfig", Type, 23}, + {"KeepAliveConfig.Count", Field, 23}, + {"KeepAliveConfig.Enable", Field, 23}, + {"KeepAliveConfig.Idle", Field, 23}, + {"KeepAliveConfig.Interval", Field, 23}, {"Listen", Func, 0}, {"ListenConfig", Type, 11}, {"ListenConfig.Control", Field, 11}, {"ListenConfig.KeepAlive", Field, 13}, + {"ListenConfig.KeepAliveConfig", Field, 23}, {"ListenIP", Func, 0}, {"ListenMulticastUDP", Func, 0}, {"ListenPacket", Func, 0}, @@ -8081,6 +8137,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Request).Context", Method, 7}, {"(*Request).Cookie", Method, 0}, {"(*Request).Cookies", Method, 0}, + {"(*Request).CookiesNamed", Method, 23}, {"(*Request).FormFile", Method, 0}, {"(*Request).FormValue", Method, 0}, {"(*Request).MultipartReader", Method, 0}, @@ -8148,7 +8205,9 @@ var PackageSymbols = map[string][]Symbol{ {"Cookie.HttpOnly", Field, 0}, {"Cookie.MaxAge", Field, 0}, {"Cookie.Name", Field, 0}, + {"Cookie.Partitioned", Field, 23}, {"Cookie.Path", Field, 0}, + {"Cookie.Quoted", Field, 23}, {"Cookie.Raw", Field, 0}, {"Cookie.RawExpires", Field, 0}, {"Cookie.SameSite", Field, 11}, @@ -8225,7 +8284,9 @@ var PackageSymbols = map[string][]Symbol{ {"NoBody", Var, 8}, {"NotFound", Func, 0}, {"NotFoundHandler", Func, 0}, + {"ParseCookie", Func, 23}, {"ParseHTTPVersion", Func, 0}, + {"ParseSetCookie", Func, 23}, {"ParseTime", Func, 1}, {"Post", Func, 0}, {"PostForm", Func, 0}, @@ -8252,6 +8313,7 @@ var PackageSymbols = map[string][]Symbol{ {"Request.Host", Field, 0}, {"Request.Method", Field, 0}, {"Request.MultipartForm", Field, 0}, + {"Request.Pattern", Field, 23}, {"Request.PostForm", Field, 1}, {"Request.Proto", Field, 0}, {"Request.ProtoMajor", Field, 0}, @@ -8453,6 +8515,7 @@ var PackageSymbols = map[string][]Symbol{ {"DefaultRemoteAddr", Const, 0}, {"NewRecorder", Func, 0}, {"NewRequest", Func, 7}, + {"NewRequestWithContext", Func, 23}, {"NewServer", Func, 0}, {"NewTLSServer", Func, 0}, {"NewUnstartedServer", Func, 0}, @@ -8917,6 +8980,7 @@ var PackageSymbols = map[string][]Symbol{ {"Chown", Func, 0}, {"Chtimes", Func, 0}, {"Clearenv", Func, 0}, + {"CopyFS", Func, 23}, {"Create", Func, 0}, {"CreateTemp", Func, 16}, {"DevNull", Const, 0}, @@ -9150,6 +9214,7 @@ var PackageSymbols = map[string][]Symbol{ {"IsLocal", Func, 20}, {"Join", Func, 0}, {"ListSeparator", Const, 0}, + {"Localize", Func, 23}, {"Match", Func, 0}, {"Rel", Func, 0}, {"Separator", Const, 0}, @@ -9232,6 +9297,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).Pointer", Method, 0}, {"(Value).Recv", Method, 0}, {"(Value).Send", Method, 0}, + {"(Value).Seq", Method, 23}, + {"(Value).Seq2", Method, 23}, {"(Value).Set", Method, 0}, {"(Value).SetBool", Method, 0}, {"(Value).SetBytes", Method, 0}, @@ -9314,6 +9381,7 @@ var PackageSymbols = map[string][]Symbol{ {"SelectSend", Const, 1}, {"SendDir", Const, 0}, {"Slice", Const, 0}, + {"SliceAt", Func, 23}, {"SliceHeader", Type, 0}, {"SliceHeader.Cap", Field, 0}, {"SliceHeader.Data", Field, 0}, @@ -9655,6 +9723,7 @@ var PackageSymbols = map[string][]Symbol{ {"BuildSetting", Type, 18}, {"BuildSetting.Key", Field, 18}, {"BuildSetting.Value", Field, 18}, + {"CrashOptions", Type, 23}, {"FreeOSMemory", Func, 1}, {"GCStats", Type, 1}, {"GCStats.LastGC", Field, 1}, @@ -9672,6 +9741,7 @@ var PackageSymbols = map[string][]Symbol{ {"PrintStack", Func, 0}, {"ReadBuildInfo", Func, 12}, {"ReadGCStats", Func, 1}, + {"SetCrashOutput", Func, 23}, {"SetGCPercent", Func, 1}, {"SetMaxStack", Func, 2}, {"SetMaxThreads", Func, 2}, @@ -9742,10 +9812,15 @@ var PackageSymbols = map[string][]Symbol{ {"WithRegion", Func, 11}, }, "slices": { + {"All", Func, 23}, + {"AppendSeq", Func, 23}, + {"Backward", Func, 23}, {"BinarySearch", Func, 21}, {"BinarySearchFunc", Func, 21}, + {"Chunk", Func, 23}, {"Clip", Func, 21}, {"Clone", Func, 21}, + {"Collect", Func, 23}, {"Compact", Func, 21}, {"CompactFunc", Func, 21}, {"Compare", Func, 21}, @@ -9767,11 +9842,16 @@ var PackageSymbols = map[string][]Symbol{ {"MaxFunc", Func, 21}, {"Min", Func, 21}, {"MinFunc", Func, 21}, + {"Repeat", Func, 23}, {"Replace", Func, 21}, {"Reverse", Func, 21}, {"Sort", Func, 21}, {"SortFunc", Func, 21}, {"SortStableFunc", Func, 21}, + {"Sorted", Func, 23}, + {"SortedFunc", Func, 23}, + {"SortedStableFunc", Func, 23}, + {"Values", Func, 23}, }, "sort": { {"(Float64Slice).Len", Method, 0}, @@ -9936,10 +10016,14 @@ var PackageSymbols = map[string][]Symbol{ {"TrimSpace", Func, 0}, {"TrimSuffix", Func, 1}, }, + "structs": { + {"HostLayout", Type, 23}, + }, "sync": { {"(*Cond).Broadcast", Method, 0}, {"(*Cond).Signal", Method, 0}, {"(*Cond).Wait", Method, 0}, + {"(*Map).Clear", Method, 23}, {"(*Map).CompareAndDelete", Method, 20}, {"(*Map).CompareAndSwap", Method, 20}, {"(*Map).Delete", Method, 9}, @@ -9986,13 +10070,17 @@ var PackageSymbols = map[string][]Symbol{ {"(*Bool).Store", Method, 19}, {"(*Bool).Swap", Method, 19}, {"(*Int32).Add", Method, 19}, + {"(*Int32).And", Method, 23}, {"(*Int32).CompareAndSwap", Method, 19}, {"(*Int32).Load", Method, 19}, + {"(*Int32).Or", Method, 23}, {"(*Int32).Store", Method, 19}, {"(*Int32).Swap", Method, 19}, {"(*Int64).Add", Method, 19}, + {"(*Int64).And", Method, 23}, {"(*Int64).CompareAndSwap", Method, 19}, {"(*Int64).Load", Method, 19}, + {"(*Int64).Or", Method, 23}, {"(*Int64).Store", Method, 19}, {"(*Int64).Swap", Method, 19}, {"(*Pointer).CompareAndSwap", Method, 19}, @@ -10000,18 +10088,24 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).Store", Method, 19}, {"(*Pointer).Swap", Method, 19}, {"(*Uint32).Add", Method, 19}, + {"(*Uint32).And", Method, 23}, {"(*Uint32).CompareAndSwap", Method, 19}, {"(*Uint32).Load", Method, 19}, + {"(*Uint32).Or", Method, 23}, {"(*Uint32).Store", Method, 19}, {"(*Uint32).Swap", Method, 19}, {"(*Uint64).Add", Method, 19}, + {"(*Uint64).And", Method, 23}, {"(*Uint64).CompareAndSwap", Method, 19}, {"(*Uint64).Load", Method, 19}, + {"(*Uint64).Or", Method, 23}, {"(*Uint64).Store", Method, 19}, {"(*Uint64).Swap", Method, 19}, {"(*Uintptr).Add", Method, 19}, + {"(*Uintptr).And", Method, 23}, {"(*Uintptr).CompareAndSwap", Method, 19}, {"(*Uintptr).Load", Method, 19}, + {"(*Uintptr).Or", Method, 23}, {"(*Uintptr).Store", Method, 19}, {"(*Uintptr).Swap", Method, 19}, {"(*Value).CompareAndSwap", Method, 17}, @@ -10023,6 +10117,11 @@ var PackageSymbols = map[string][]Symbol{ {"AddUint32", Func, 0}, {"AddUint64", Func, 0}, {"AddUintptr", Func, 0}, + {"AndInt32", Func, 23}, + {"AndInt64", Func, 23}, + {"AndUint32", Func, 23}, + {"AndUint64", Func, 23}, + {"AndUintptr", Func, 23}, {"Bool", Type, 19}, {"CompareAndSwapInt32", Func, 0}, {"CompareAndSwapInt64", Func, 0}, @@ -10038,6 +10137,11 @@ var PackageSymbols = map[string][]Symbol{ {"LoadUint32", Func, 0}, {"LoadUint64", Func, 0}, {"LoadUintptr", Func, 0}, + {"OrInt32", Func, 23}, + {"OrInt64", Func, 23}, + {"OrUint32", Func, 23}, + {"OrUint64", Func, 23}, + {"OrUintptr", Func, 23}, {"Pointer", Type, 19}, {"StoreInt32", Func, 0}, {"StoreInt64", Func, 0}, @@ -16200,6 +16304,7 @@ var PackageSymbols = map[string][]Symbol{ {"WSAEACCES", Const, 2}, {"WSAECONNABORTED", Const, 9}, {"WSAECONNRESET", Const, 3}, + {"WSAENOPROTOOPT", Const, 23}, {"WSAEnumProtocols", Func, 2}, {"WSAID_CONNECTEX", Var, 1}, {"WSAIoctl", Func, 0}, @@ -17284,6 +17389,7 @@ var PackageSymbols = map[string][]Symbol{ {"Encode", Func, 0}, {"EncodeRune", Func, 0}, {"IsSurrogate", Func, 0}, + {"RuneLen", Func, 23}, }, "unicode/utf8": { {"AppendRune", Func, 18}, @@ -17306,6 +17412,11 @@ var PackageSymbols = map[string][]Symbol{ {"ValidRune", Func, 1}, {"ValidString", Func, 0}, }, + "unique": { + {"(Handle).Value", Method, 23}, + {"Handle", Type, 23}, + {"Make", Func, 23}, + }, "unsafe": { {"Add", Func, 0}, {"Alignof", Func, 0}, diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 7c77c2fbc0..8392328612 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -48,3 +48,18 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true } + +// NameRelativeTo returns a types.Qualifier that qualifies members of +// all packages other than pkg, using only the package name. +// (By contrast, [types.RelativeTo] uses the complete package path, +// which is often excessive.) +// +// If pkg is nil, it is equivalent to [*types.Package.Name]. +func NameRelativeTo(pkg *types.Package) types.Qualifier { + return func(other *types.Package) string { + if pkg != nil && pkg == other { + return "" // same package; unqualified + } + return other.Name() + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go new file mode 100644 index 0000000000..179063d484 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import "go/build/constraint" + +// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). +// Otherwise nil. +// +// Deprecate once x/tools is after go1.21. +var ConstraintGoVersion func(x constraint.Expr) string diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go new file mode 100644 index 0000000000..38011407d5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +import "go/build/constraint" + +func init() { + ConstraintGoVersion = constraint.GoVersion +} diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 70b57dea6a..f0c6948458 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -217,9 +217,9 @@ type GenerateAccessTokenRequest struct { NullFields []string `json:"-"` } -func (s *GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { +func (s GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateAccessTokenRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateAccessTokenResponse struct { @@ -243,9 +243,9 @@ type GenerateAccessTokenResponse struct { NullFields []string `json:"-"` } -func (s *GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateAccessTokenResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateIdTokenRequest struct { @@ -277,9 +277,9 @@ type GenerateIdTokenRequest struct { NullFields []string `json:"-"` } -func (s *GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { +func (s GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateIdTokenRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateIdTokenResponse struct { @@ -301,9 +301,9 @@ type GenerateIdTokenResponse struct { NullFields []string `json:"-"` } -func (s *GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateIdTokenResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignBlobRequest struct { @@ -331,9 +331,9 @@ type SignBlobRequest struct { NullFields []string `json:"-"` } -func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { +func (s SignBlobRequest) MarshalJSON() ([]byte, error) { type NoMethod SignBlobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignBlobResponse struct { @@ -368,9 +368,9 @@ type SignBlobResponse struct { NullFields []string `json:"-"` } -func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { +func (s SignBlobResponse) MarshalJSON() ([]byte, error) { type NoMethod SignBlobResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignJwtRequest struct { @@ -402,9 +402,9 @@ type SignJwtRequest struct { NullFields []string `json:"-"` } -func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { +func (s SignJwtRequest) MarshalJSON() ([]byte, error) { type NoMethod SignJwtRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignJwtResponse struct { @@ -441,9 +441,9 @@ type SignJwtResponse struct { NullFields []string `json:"-"` } -func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { +func (s SignJwtResponse) MarshalJSON() ([]byte, error) { type NoMethod SignJwtResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsServiceAccountsGenerateAccessTokenCall struct { diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b6dbace4c9..5ea555ed01 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -42,6 +42,26 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { return creds, nil } +// GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport. +// The OAuth2 transport and endpoint will be configured for mTLS if applicable. +func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return "", nil, err + } + tokenURL := oAuth2Endpoint(clientCertSource) + var oauth2Client *http.Client + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + oauth2Client = customHTTPClient(tlsConfig) + } else { + oauth2Client = oauth2.NewClient(ctx, nil) + } + return tokenURL, oauth2Client, nil +} + func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) { // Preserve old options behavior if settings.InternalCredentials != nil { @@ -80,13 +100,18 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti aud = settings.DefaultAudience } + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, settings) + if err != nil { + return nil, err + } creds, err := credentials.DetectDefault(&credentials.DetectOptions{ Scopes: scopes, Audience: aud, CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, - Client: oauth2.NewClient(ctx, nil), + TokenURL: tokenURL, + Client: oauth2Client, }) if err != nil { return nil, err @@ -102,7 +127,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro if ds.Credentials != nil { return ds.Credentials, nil } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { @@ -147,19 +172,12 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g var params google.CredentialsParams params.Scopes = ds.GetScopes() - // Determine configurations for the OAuth2 transport, which is separate from the API transport. - // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, err := getClientCertificateSource(ds) + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, ds) if err != nil { return nil, err } - params.TokenURL = oAuth2Endpoint(clientCertSource) - if clientCertSource != nil { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) - } + params.TokenURL = tokenURL + ctx = context.WithValue(ctx, oauth2.HTTPClient, oauth2Client) // By default, a standard OAuth 2.0 token source is created cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 08e7aacefb..f828ddb60e 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -171,6 +171,10 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err if resp != nil && resp.Body != nil { resp.Body.Close() } + // If there were retries, indicate this in the error message and wrap the final error. + if rx.attempts > 1 { + return nil, fmt.Errorf("chunk upload failed after %d attempts;, final error: %w", rx.attempts, err) + } return nil, err } // This case is very unlikely but possible only if rx.ChunkRetryDeadline is diff --git a/vendor/google.golang.org/api/internal/gensupport/retry.go b/vendor/google.golang.org/api/internal/gensupport/retry.go index 20b57d925f..089ee3189b 100644 --- a/vendor/google.golang.org/api/internal/gensupport/retry.go +++ b/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -8,6 +8,7 @@ import ( "errors" "io" "net" + "net/url" "strings" "time" @@ -29,8 +30,6 @@ var ( backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} } - // syscallRetryable is a platform-specific hook, specified in retryable_linux.go - syscallRetryable func(error) bool = func(err error) bool { return false } ) const ( @@ -56,30 +55,33 @@ func shouldRetry(status int, err error) bool { if status == statusTooManyRequests || status == statusRequestTimeout { return true } - if err == io.ErrUnexpectedEOF { + if errors.Is(err, io.ErrUnexpectedEOF) { return true } - // Transient network errors should be retried. - if syscallRetryable(err) { + if errors.Is(err, net.ErrClosed) { return true } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true + switch e := err.(type) { + case *net.OpError, *url.Error: + // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset", "broken pipe"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } } - } - var opErr *net.OpError - if errors.As(err, &opErr) { - if strings.Contains(opErr.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string + case interface{ Temporary() bool }: + if e.Temporary() { return true } } - // If Go 1.13 error unwrapping is available, use this to examine wrapped + // If error unwrapping is available, use this to examine wrapped // errors. - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) + if e, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, e.Unwrap()) } return false } diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go deleted file mode 100644 index a916c3da29..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package gensupport - -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index f39dd00d99..f6716134eb 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -48,8 +48,24 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* if ctx != nil { headers := callctx.HeadersFromContext(ctx) for k, vals := range headers { - for _, v := range vals { - req.Header.Add(k, v) + if k == "x-goog-api-client" { + // Merge all values into a single "x-goog-api-client" header. + var mergedVal strings.Builder + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + if baseXGoogHeader != "" { + mergedVal.WriteString(baseXGoogHeader) + mergedVal.WriteRune(' ') + } + for _, v := range vals { + mergedVal.WriteString(v) + mergedVal.WriteRune(' ') + } + // Remove the last space and replace the header on the request. + req.Header.Set(k, mergedVal.String()[:mergedVal.Len()-1]) + } else { + for _, v := range vals { + req.Header.Add(k, v) + } } } } @@ -118,7 +134,9 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var err error attempts := 1 invocationID := uuid.New().String() - baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + + xGoogHeaderVals := req.Header.Values("X-Goog-Api-Client") + baseXGoogHeader := strings.Join(xGoogHeaderVals, " ") // Loop to retry the request, up to the context deadline. var pause time.Duration diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 6d0c18e5a8..edba49af49 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -126,7 +126,7 @@ func (ds *DialSettings) Validate() error { if ds.Credentials != nil { nCreds++ } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { nCreds++ } if ds.CredentialsFile != "" { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index caf8441363..e2edf688d4 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.183.0" +const Version = "0.188.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 160800c2bb..9c06c5ede4 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -27,13 +27,23 @@ "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west3.rep.googleapis.com/", + "location": "europe-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west9.rep.googleapis.com/", + "location": "europe-west9" + }, { "description": "Regional Endpoint", "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", "location": "me-central2" } ], - "etag": "\"3132383134303835313436343635393933303731\"", + "etag": "\"39393931363036383932333134343736343437\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -4075,7 +4085,7 @@ } } }, - "revision": "20240524", + "revision": "20240625", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { @@ -5007,6 +5017,11 @@ "description": "The response message for storage.buckets.operations.list.", "id": "GoogleLongrunningListOperationsResponse", "properties": { + "kind": { + "default": "storage#operations", + "description": "The kind of item this is. For lists of operations, this is always storage#operations.", + "type": "string" + }, "nextPageToken": { "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", "type": "string" @@ -5033,6 +5048,11 @@ "$ref": "GoogleRpcStatus", "description": "The error result of the operation in case of failure or cancellation." }, + "kind": { + "default": "storage#operation", + "description": "The kind of item this is. For operations, this is always storage#operation.", + "type": "string" + }, "metadata": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -5052,6 +5072,10 @@ }, "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", "type": "object" + }, + "selfLink": { + "description": "The link to this long running operation.", + "type": "string" } }, "type": "object" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index b4d425e59f..cfa73e2e38 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -388,9 +388,9 @@ type AnywhereCache struct { NullFields []string `json:"-"` } -func (s *AnywhereCache) MarshalJSON() ([]byte, error) { +func (s AnywhereCache) MarshalJSON() ([]byte, error) { type NoMethod AnywhereCache - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AnywhereCaches: A list of Anywhere Caches. @@ -420,9 +420,9 @@ type AnywhereCaches struct { NullFields []string `json:"-"` } -func (s *AnywhereCaches) MarshalJSON() ([]byte, error) { +func (s AnywhereCaches) MarshalJSON() ([]byte, error) { type NoMethod AnywhereCaches - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Bucket: A bucket. @@ -548,9 +548,9 @@ type Bucket struct { NullFields []string `json:"-"` } -func (s *Bucket) MarshalJSON() ([]byte, error) { +func (s Bucket) MarshalJSON() ([]byte, error) { type NoMethod Bucket - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAutoclass: The bucket's Autoclass configuration. @@ -580,9 +580,9 @@ type BucketAutoclass struct { NullFields []string `json:"-"` } -func (s *BucketAutoclass) MarshalJSON() ([]byte, error) { +func (s BucketAutoclass) MarshalJSON() ([]byte, error) { type NoMethod BucketAutoclass - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketBilling: The bucket's billing configuration. @@ -602,9 +602,9 @@ type BucketBilling struct { NullFields []string `json:"-"` } -func (s *BucketBilling) MarshalJSON() ([]byte, error) { +func (s BucketBilling) MarshalJSON() ([]byte, error) { type NoMethod BucketBilling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BucketCors struct { @@ -634,9 +634,9 @@ type BucketCors struct { NullFields []string `json:"-"` } -func (s *BucketCors) MarshalJSON() ([]byte, error) { +func (s BucketCors) MarshalJSON() ([]byte, error) { type NoMethod BucketCors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketCustomPlacementConfig: The bucket's custom placement configuration for @@ -657,9 +657,9 @@ type BucketCustomPlacementConfig struct { NullFields []string `json:"-"` } -func (s *BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { +func (s BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { type NoMethod BucketCustomPlacementConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketEncryption: Encryption configuration for a bucket. @@ -680,9 +680,9 @@ type BucketEncryption struct { NullFields []string `json:"-"` } -func (s *BucketEncryption) MarshalJSON() ([]byte, error) { +func (s BucketEncryption) MarshalJSON() ([]byte, error) { type NoMethod BucketEncryption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketHierarchicalNamespace: The bucket's hierarchical namespace @@ -704,9 +704,9 @@ type BucketHierarchicalNamespace struct { NullFields []string `json:"-"` } -func (s *BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { +func (s BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { type NoMethod BucketHierarchicalNamespace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfiguration: The bucket's IAM configuration. @@ -736,9 +736,9 @@ type BucketIamConfiguration struct { NullFields []string `json:"-"` } -func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { +func (s BucketIamConfiguration) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfigurationBucketPolicyOnly: The bucket's uniform bucket-level @@ -768,9 +768,9 @@ type BucketIamConfigurationBucketPolicyOnly struct { NullFields []string `json:"-"` } -func (s *BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { +func (s BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfigurationBucketPolicyOnly - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfigurationUniformBucketLevelAccess: The bucket's uniform @@ -798,9 +798,9 @@ type BucketIamConfigurationUniformBucketLevelAccess struct { NullFields []string `json:"-"` } -func (s *BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { +func (s BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfigurationUniformBucketLevelAccess - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycle: The bucket's lifecycle configuration. See lifecycle @@ -822,9 +822,9 @@ type BucketLifecycle struct { NullFields []string `json:"-"` } -func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { +func (s BucketLifecycle) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycle - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BucketLifecycleRule struct { @@ -845,9 +845,9 @@ type BucketLifecycleRule struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRule) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleAction: The action to take. @@ -871,9 +871,9 @@ type BucketLifecycleRuleAction struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRuleAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleCondition: The condition(s) under which the action will @@ -947,9 +947,9 @@ type BucketLifecycleRuleCondition struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRuleCondition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLogging: The bucket's logging configuration, which defines the @@ -973,9 +973,9 @@ type BucketLogging struct { NullFields []string `json:"-"` } -func (s *BucketLogging) MarshalJSON() ([]byte, error) { +func (s BucketLogging) MarshalJSON() ([]byte, error) { type NoMethod BucketLogging - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketObjectRetention: The bucket's object retention config. @@ -995,9 +995,9 @@ type BucketObjectRetention struct { NullFields []string `json:"-"` } -func (s *BucketObjectRetention) MarshalJSON() ([]byte, error) { +func (s BucketObjectRetention) MarshalJSON() ([]byte, error) { type NoMethod BucketObjectRetention - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketOwner: The owner of the bucket. This is always the project team's @@ -1020,9 +1020,9 @@ type BucketOwner struct { NullFields []string `json:"-"` } -func (s *BucketOwner) MarshalJSON() ([]byte, error) { +func (s BucketOwner) MarshalJSON() ([]byte, error) { type NoMethod BucketOwner - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketRetentionPolicy: The bucket's retention policy. The retention policy @@ -1058,9 +1058,9 @@ type BucketRetentionPolicy struct { NullFields []string `json:"-"` } -func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { +func (s BucketRetentionPolicy) MarshalJSON() ([]byte, error) { type NoMethod BucketRetentionPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketSoftDeletePolicy: The bucket's soft delete policy, which defines the @@ -1087,9 +1087,9 @@ type BucketSoftDeletePolicy struct { NullFields []string `json:"-"` } -func (s *BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { +func (s BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { type NoMethod BucketSoftDeletePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketVersioning: The bucket's versioning configuration. @@ -1109,9 +1109,9 @@ type BucketVersioning struct { NullFields []string `json:"-"` } -func (s *BucketVersioning) MarshalJSON() ([]byte, error) { +func (s BucketVersioning) MarshalJSON() ([]byte, error) { type NoMethod BucketVersioning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketWebsite: The bucket's website configuration, controlling how the @@ -1140,9 +1140,9 @@ type BucketWebsite struct { NullFields []string `json:"-"` } -func (s *BucketWebsite) MarshalJSON() ([]byte, error) { +func (s BucketWebsite) MarshalJSON() ([]byte, error) { type NoMethod BucketWebsite - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControl: An access-control entry. @@ -1199,9 +1199,9 @@ type BucketAccessControl struct { NullFields []string `json:"-"` } -func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { +func (s BucketAccessControl) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControl - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControlProjectTeam: The project team associated with the entity, @@ -1224,9 +1224,9 @@ type BucketAccessControlProjectTeam struct { NullFields []string `json:"-"` } -func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { +func (s BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControlProjectTeam - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControls: An access-control list. @@ -1252,9 +1252,9 @@ type BucketAccessControls struct { NullFields []string `json:"-"` } -func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { +func (s BucketAccessControls) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControls - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketStorageLayout: The storage layout configuration of a bucket. @@ -1289,9 +1289,9 @@ type BucketStorageLayout struct { NullFields []string `json:"-"` } -func (s *BucketStorageLayout) MarshalJSON() ([]byte, error) { +func (s BucketStorageLayout) MarshalJSON() ([]byte, error) { type NoMethod BucketStorageLayout - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketStorageLayoutCustomPlacementConfig: The bucket's custom placement @@ -1312,9 +1312,9 @@ type BucketStorageLayoutCustomPlacementConfig struct { NullFields []string `json:"-"` } -func (s *BucketStorageLayoutCustomPlacementConfig) MarshalJSON() ([]byte, error) { +func (s BucketStorageLayoutCustomPlacementConfig) MarshalJSON() ([]byte, error) { type NoMethod BucketStorageLayoutCustomPlacementConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketStorageLayoutHierarchicalNamespace: The bucket's hierarchical @@ -1336,9 +1336,9 @@ type BucketStorageLayoutHierarchicalNamespace struct { NullFields []string `json:"-"` } -func (s *BucketStorageLayoutHierarchicalNamespace) MarshalJSON() ([]byte, error) { +func (s BucketStorageLayoutHierarchicalNamespace) MarshalJSON() ([]byte, error) { type NoMethod BucketStorageLayoutHierarchicalNamespace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Buckets: A list of buckets. @@ -1368,9 +1368,9 @@ type Buckets struct { NullFields []string `json:"-"` } -func (s *Buckets) MarshalJSON() ([]byte, error) { +func (s Buckets) MarshalJSON() ([]byte, error) { type NoMethod Buckets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BulkRestoreObjectsRequest: A bulk restore objects request. @@ -1409,9 +1409,9 @@ type BulkRestoreObjectsRequest struct { NullFields []string `json:"-"` } -func (s *BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { +func (s BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { type NoMethod BulkRestoreObjectsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Channel: An notification channel used to watch for resource changes. @@ -1457,9 +1457,9 @@ type Channel struct { NullFields []string `json:"-"` } -func (s *Channel) MarshalJSON() ([]byte, error) { +func (s Channel) MarshalJSON() ([]byte, error) { type NoMethod Channel - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposeRequest: A Compose request. @@ -1484,9 +1484,9 @@ type ComposeRequest struct { NullFields []string `json:"-"` } -func (s *ComposeRequest) MarshalJSON() ([]byte, error) { +func (s ComposeRequest) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ComposeRequestSourceObjects struct { @@ -1511,9 +1511,9 @@ type ComposeRequestSourceObjects struct { NullFields []string `json:"-"` } -func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { +func (s ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequestSourceObjects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposeRequestSourceObjectsObjectPreconditions: Conditions that must be met @@ -1537,9 +1537,9 @@ type ComposeRequestSourceObjectsObjectPreconditions struct { NullFields []string `json:"-"` } -func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { +func (s ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequestSourceObjectsObjectPreconditions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents an expression text. Example: title: "User account presence" @@ -1573,9 +1573,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Folder: A folder. Only available in buckets with hierarchical namespace @@ -1618,9 +1618,9 @@ type Folder struct { NullFields []string `json:"-"` } -func (s *Folder) MarshalJSON() ([]byte, error) { +func (s Folder) MarshalJSON() ([]byte, error) { type NoMethod Folder - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FolderPendingRenameInfo: Only present if the folder is part of an ongoing @@ -1642,9 +1642,9 @@ type FolderPendingRenameInfo struct { NullFields []string `json:"-"` } -func (s *FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { +func (s FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { type NoMethod FolderPendingRenameInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Folders: A list of folders. @@ -1674,14 +1674,17 @@ type Folders struct { NullFields []string `json:"-"` } -func (s *Folders) MarshalJSON() ([]byte, error) { +func (s Folders) MarshalJSON() ([]byte, error) { type NoMethod Folders - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningListOperationsResponse: The response message for // storage.buckets.operations.list. type GoogleLongrunningListOperationsResponse struct { + // Kind: The kind of item this is. For lists of operations, this is always + // storage#operations. + Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large result // sets. Provide this value in a subsequent request to return the next page of // results. @@ -1692,22 +1695,22 @@ type GoogleLongrunningListOperationsResponse struct { // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningOperation: This resource represents a long-running @@ -1719,6 +1722,9 @@ type GoogleLongrunningOperation struct { Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or cancellation. Error *GoogleRpcStatus `json:"error,omitempty"` + // Kind: The kind of item this is. For operations, this is always + // storage#operation. + Kind string `json:"kind,omitempty"` // Metadata: Service-specific metadata associated with the operation. It // typically contains progress information and common metadata such as create // time. Some services might not provide such metadata. Any method that returns @@ -1736,6 +1742,8 @@ type GoogleLongrunningOperation struct { // method name. For example, if the original method name is "TakeSnapshot()", // the inferred response type is "TakeSnapshotResponse". Response googleapi.RawMessage `json:"response,omitempty"` + // SelfLink: The link to this long running operation. + SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -1752,9 +1760,9 @@ type GoogleLongrunningOperation struct { NullFields []string `json:"-"` } -func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleRpcStatus: The "Status" type defines a logical error model that is @@ -1784,9 +1792,9 @@ type GoogleRpcStatus struct { NullFields []string `json:"-"` } -func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { +func (s GoogleRpcStatus) MarshalJSON() ([]byte, error) { type NoMethod GoogleRpcStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKey: JSON template to produce a JSON-style HMAC Key resource for Create @@ -1815,9 +1823,9 @@ type HmacKey struct { NullFields []string `json:"-"` } -func (s *HmacKey) MarshalJSON() ([]byte, error) { +func (s HmacKey) MarshalJSON() ([]byte, error) { type NoMethod HmacKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key metadata @@ -1863,9 +1871,9 @@ type HmacKeyMetadata struct { NullFields []string `json:"-"` } -func (s *HmacKeyMetadata) MarshalJSON() ([]byte, error) { +func (s HmacKeyMetadata) MarshalJSON() ([]byte, error) { type NoMethod HmacKeyMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKeysMetadata: A list of hmacKeys. @@ -1895,9 +1903,9 @@ type HmacKeysMetadata struct { NullFields []string `json:"-"` } -func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { +func (s HmacKeysMetadata) MarshalJSON() ([]byte, error) { type NoMethod HmacKeysMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedFolder: A managed folder. @@ -1939,9 +1947,9 @@ type ManagedFolder struct { NullFields []string `json:"-"` } -func (s *ManagedFolder) MarshalJSON() ([]byte, error) { +func (s ManagedFolder) MarshalJSON() ([]byte, error) { type NoMethod ManagedFolder - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedFolders: A list of managed folders. @@ -1971,9 +1979,9 @@ type ManagedFolders struct { NullFields []string `json:"-"` } -func (s *ManagedFolders) MarshalJSON() ([]byte, error) { +func (s ManagedFolders) MarshalJSON() ([]byte, error) { type NoMethod ManagedFolders - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Notification: A subscription to receive Google PubSub notifications. @@ -2018,9 +2026,9 @@ type Notification struct { NullFields []string `json:"-"` } -func (s *Notification) MarshalJSON() ([]byte, error) { +func (s Notification) MarshalJSON() ([]byte, error) { type NoMethod Notification - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Notifications: A list of notification subscriptions. @@ -2046,9 +2054,9 @@ type Notifications struct { NullFields []string `json:"-"` } -func (s *Notifications) MarshalJSON() ([]byte, error) { +func (s Notifications) MarshalJSON() ([]byte, error) { type NoMethod Notifications - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Object: An object. @@ -2188,9 +2196,9 @@ type Object struct { NullFields []string `json:"-"` } -func (s *Object) MarshalJSON() ([]byte, error) { +func (s Object) MarshalJSON() ([]byte, error) { type NoMethod Object - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectCustomerEncryption: Metadata of customer-supplied encryption key, if @@ -2213,9 +2221,9 @@ type ObjectCustomerEncryption struct { NullFields []string `json:"-"` } -func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { +func (s ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { type NoMethod ObjectCustomerEncryption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectOwner: The owner of the object. This will always be the uploader of @@ -2238,9 +2246,9 @@ type ObjectOwner struct { NullFields []string `json:"-"` } -func (s *ObjectOwner) MarshalJSON() ([]byte, error) { +func (s ObjectOwner) MarshalJSON() ([]byte, error) { type NoMethod ObjectOwner - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectRetention: A collection of object level retention parameters. @@ -2263,9 +2271,9 @@ type ObjectRetention struct { NullFields []string `json:"-"` } -func (s *ObjectRetention) MarshalJSON() ([]byte, error) { +func (s ObjectRetention) MarshalJSON() ([]byte, error) { type NoMethod ObjectRetention - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControl: An access-control entry. @@ -2326,9 +2334,9 @@ type ObjectAccessControl struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControl) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControl - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControlProjectTeam: The project team associated with the entity, @@ -2351,9 +2359,9 @@ type ObjectAccessControlProjectTeam struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControlProjectTeam - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControls: An access-control list. @@ -2379,9 +2387,9 @@ type ObjectAccessControls struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControls) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControls - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Objects: A list of objects. @@ -2414,9 +2422,9 @@ type Objects struct { NullFields []string `json:"-"` } -func (s *Objects) MarshalJSON() ([]byte, error) { +func (s Objects) MarshalJSON() ([]byte, error) { type NoMethod Objects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: A bucket/object/managedFolder IAM policy. @@ -2455,9 +2463,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PolicyBindings struct { @@ -2527,9 +2535,9 @@ type PolicyBindings struct { NullFields []string `json:"-"` } -func (s *PolicyBindings) MarshalJSON() ([]byte, error) { +func (s PolicyBindings) MarshalJSON() ([]byte, error) { type NoMethod PolicyBindings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RewriteResponse: A rewrite response. @@ -2569,9 +2577,9 @@ type RewriteResponse struct { NullFields []string `json:"-"` } -func (s *RewriteResponse) MarshalJSON() ([]byte, error) { +func (s RewriteResponse) MarshalJSON() ([]byte, error) { type NoMethod RewriteResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAccount: A subscription to receive Google PubSub notifications. @@ -2597,9 +2605,9 @@ type ServiceAccount struct { NullFields []string `json:"-"` } -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { +func (s ServiceAccount) MarshalJSON() ([]byte, error) { type NoMethod ServiceAccount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: A @@ -2648,9 +2656,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AnywhereCachesDisableCall struct { diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 2e66d02b37..12ebb0a11e 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -218,6 +218,11 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna defaultEndpointTemplate = ds.DefaultEndpoint } + tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) + if err != nil { + return nil, err + } + pool, err := grpctransport.Dial(ctx, secure, &grpctransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, @@ -226,12 +231,14 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna GRPCDialOpts: ds.GRPCDialOpts, PoolSize: poolSize, Credentials: creds, + APIKey: ds.APIKey, DetectOpts: &credentials.DetectOptions{ Scopes: ds.Scopes, Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - Client: oauth2.NewClient(ctx, nil), + TokenURL: tokenURL, + Client: oauth2Client, }, InternalOptions: &grpctransport.InternalOptions{ EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index d1cd83b62d..a36e24315b 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -107,6 +107,10 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. if ds.RequestReason != "" { headers.Set("X-goog-request-reason", ds.RequestReason) } + tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) + if err != nil { + return nil, err + } client, err := httptransport.NewClient(&httptransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, @@ -121,7 +125,8 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - Client: oauth2.NewClient(ctx, nil), + TokenURL: tokenURL, + Client: oauth2Client, }, InternalOptions: &httptransport.InternalOptions{ EnableJWTWithScope: ds.EnableJwtWithScope, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 636edb460a..fe19e8f97a 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1024,6 +1024,13 @@ type MethodSettings struct { // The fully qualified name of the method, for which the options below apply. // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Describes settings to use for long-running operations when generating // API methods for RPCs. Complements RPCs that use the annotations in @@ -1033,15 +1040,12 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` // List of top-level fields of the request message, that should be // automatically populated by the client libraries based on their @@ -1051,9 +1055,9 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.example.v1.ExampleService.CreateExample - // auto_populated_fields: - // - request_id + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index d339dfb02a..a462e7d013 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -121,6 +121,11 @@ type FieldInfo struct { // any API consumer, just documents the API's format for the field it is // applied to. Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` } func (x *FieldInfo) Reset() { @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format { return FieldInfo_FORMAT_UNSPECIFIED } +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FieldOptions)(nil), @@ -185,6 +254,13 @@ var ( // string actual_ip_address = 4 [ // (google.api.field_info).format = IPV4_OR_IPV6 // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; // // optional google.api.FieldInfo field_info = 291403980; E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte { } var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_field_info_proto_goTypes = []interface{}{ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions } var file_google_api_field_info_proto_depIdxs = []int32{ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_api_field_info_proto_init() } @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() { return nil } } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_field_info_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 1, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 76ea76df33..ffb5838cb1 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { return false } -// # gRPC Transcoding +// gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables an HTTP REST to gRPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// ## Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body, all // fields are passed via URL path and URL query parameters. // -// ### Path template syntax +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// ## Using gRPC API Service Configuration +// # Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specified in the service config will override any matching transcoding // configuration in the proto. // -// Example: +// The following example selects a gRPC method and applies an `HttpRule` to it: // // http: // rules: -// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// ## Special notes +// # Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 7a3fd93fcd..b5db279aeb 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -253,8 +253,13 @@ type ResourceDescriptor struct { History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ // // Note: The plural form is required even for singleton resources. See diff --git a/vendor/modules.txt b/vendor/modules.txt index d068066d28..d7da0ad720 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,10 +1,10 @@ -# cloud.google.com/go v0.114.0 +# cloud.google.com/go v0.115.0 ## explicit; go 1.20 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.5.1 +# cloud.google.com/go/auth v0.7.0 ## explicit; go 1.20 cloud.google.com/go/auth cloud.google.com/go/auth/credentials @@ -23,20 +23,20 @@ cloud.google.com/go/auth/internal/transport/cert # cloud.google.com/go/auth/oauth2adapt v0.2.2 ## explicit; go 1.19 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.3.0 -## explicit; go 1.19 +# cloud.google.com/go/compute/metadata v0.4.0 +## explicit; go 1.20 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.8 -## explicit; go 1.19 +# cloud.google.com/go/iam v1.1.10 +## explicit; go 1.20 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/storage v1.40.0 +# cloud.google.com/go/storage v1.41.0 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -62,7 +62,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -119,7 +119,7 @@ github.com/VictoriaMetrics/fastcache ## explicit github.com/alecthomas/template github.com/alecthomas/template/parse -# github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 +# github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 ## explicit; go 1.15 github.com/alecthomas/units # github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a @@ -341,7 +341,7 @@ github.com/facette/natsort # github.com/fatih/color v1.16.0 ## explicit; go 1.17 github.com/fatih/color -# github.com/felixge/fgprof v0.9.4 +# github.com/felixge/fgprof v0.9.5 ## explicit; go 1.14 github.com/felixge/fgprof # github.com/felixge/httpsnoop v1.0.4 @@ -489,8 +489,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20240528025155-186aa0362fba -## explicit; go 1.19 +# github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 +## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/s2a-go v0.1.7 ## explicit; go 1.19 @@ -522,8 +522,8 @@ github.com/google/uuid ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.12.4 -## explicit; go 1.19 +# github.com/googleapis/gax-go/v2 v2.12.5 +## explicit; go 1.20 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto @@ -542,16 +542,12 @@ github.com/grpc-ecosystem/go-grpc-middleware # github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 ## explicit; go 1.19 github.com/grpc-ecosystem/go-grpc-middleware/v2 -github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors -github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/tags -github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/tracing -github.com/grpc-ecosystem/go-grpc-middleware/v2/util/metautils -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 -## explicit; go 1.17 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 +## explicit; go 1.21 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/hashicorp/consul/api v1.29.2 +# github.com/hashicorp/consul/api v1.29.4 ## explicit; go 1.19 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.1.0 @@ -663,15 +659,16 @@ github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a ## explicit; go 1.14 github.com/metalmatze/signal/server/signalhttp -# github.com/miekg/dns v1.1.59 +# github.com/miekg/dns v1.1.62 ## explicit; go 1.19 github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.74 +# github.com/minio/minio-go/v7 v7.0.75 ## explicit; go 1.21 github.com/minio/minio-go/v7 +github.com/minio/minio-go/v7/pkg/cors github.com/minio/minio-go/v7/pkg/credentials github.com/minio/minio-go/v7/pkg/encrypt github.com/minio/minio-go/v7/pkg/lifecycle @@ -793,10 +790,12 @@ github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/timeinterval github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.19.1 +# github.com/prometheus/client_golang v1.20.2 ## explicit; go 1.20 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/collectors/version @@ -810,7 +809,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 +# github.com/prometheus/common v0.58.0 ## explicit; go 1.20 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -830,7 +829,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.53.2-0.20240718123124-e9dec5fc537b +# github.com/prometheus/prometheus v0.54.0-rc.0 ## explicit; go 1.21.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -971,7 +970,7 @@ github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/ringbuffer github.com/thanos-io/promql-engine/storage github.com/thanos-io/promql-engine/storage/prometheus -# github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647 +# github.com/thanos-io/thanos v0.35.2-0.20240904173945-09db52562de0 ## explicit; go 1.22.0 github.com/thanos-io/thanos/pkg/api/query/querypb github.com/thanos-io/thanos/pkg/block @@ -1133,7 +1132,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector/pdata v1.12.0 +# go.opentelemetry.io/collector/pdata v1.13.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data @@ -1152,34 +1151,34 @@ go.opentelemetry.io/collector/pdata/internal/otlp go.opentelemetry.io/collector/pdata/pcommon go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp -# go.opentelemetry.io/collector/semconv v0.104.0 +# go.opentelemetry.io/collector/semconv v0.105.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/semconv/v1.6.1 # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 ## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/contrib/propagators/autoprop v0.53.0 +# go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/autoprop -# go.opentelemetry.io/contrib/propagators/aws v1.28.0 +# go.opentelemetry.io/contrib/propagators/aws v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/aws/xray -# go.opentelemetry.io/contrib/propagators/b3 v1.28.0 +# go.opentelemetry.io/contrib/propagators/b3 v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/b3 -# go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 +# go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/jaeger -# go.opentelemetry.io/contrib/propagators/ot v1.28.0 +# go.opentelemetry.io/contrib/propagators/ot v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/ot -# go.opentelemetry.io/otel v1.28.0 +# go.opentelemetry.io/otel v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1195,28 +1194,29 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 +go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/bridge/opentracing v1.28.0 +# go.opentelemetry.io/otel/bridge/opentracing v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.28.0 +# go.opentelemetry.io/otel/metric v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.28.0 +# go.opentelemetry.io/otel/sdk v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -1225,7 +1225,7 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.28.0 +# go.opentelemetry.io/otel/trace v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1261,7 +1261,7 @@ go4.org/intern # go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 ## explicit; go 1.11 go4.org/unsafe/assume-no-moving-gc -# golang.org/x/crypto v0.25.0 +# golang.org/x/crypto v0.26.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1281,10 +1281,10 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.18.0 +# golang.org/x/mod v0.20.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.27.0 +# golang.org/x/net v0.28.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -1303,7 +1303,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.22.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1320,13 +1320,13 @@ golang.org/x/oauth2/jwt ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.22.0 +# golang.org/x/sys v0.24.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.16.0 +# golang.org/x/text v0.17.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/internal @@ -1339,13 +1339,12 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.5.0 +# golang.org/x/time v0.6.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.22.0 +# golang.org/x/tools v0.24.0 ## explicit; go 1.19 golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath golang.org/x/tools/internal/aliases @@ -1366,7 +1365,7 @@ golang.org/x/tools/internal/versions gonum.org/v1/gonum/floats gonum.org/v1/gonum/floats/scalar gonum.org/v1/gonum/internal/asm/f64 -# google.golang.org/api v0.183.0 +# google.golang.org/api v0.188.0 ## explicit; go 1.20 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1384,17 +1383,17 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto v0.0.0-20240528184218-531527333157 +# google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b ## explicit; go 1.20 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd +## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status
UserLoaded Blocks # Series # Active Series Total Ingest Rate
{{ .UserID }}{{ .UserStats.LoadBlocks }} {{ .UserStats.NumSeries }} {{ .UserStats.ActiveSeries }} {{ printf "%.2f" .UserStats.IngestionRate }}